[Snippit][Java]Use PC Keyboard on phone using ADB - Galaxy S 4 Developer Discussion [Developers-Only]

I always get kinda annoyed when I have to write a lot of text on my phone, the psychical keyboard of a pc is much better of course. so I decided to make script that converts your key presses to direct input on your phone. it uses an adb line for each character you type, so you may imagine it will get spammed when you type to fast. Also not all keys are defined, I did:
-- A-Z
-- 0-9
-- "space", "backspace" and "enter"
{
"lightbox_close": "Close",
"lightbox_next": "Next",
"lightbox_previous": "Previous",
"lightbox_error": "The requested content cannot be loaded. Please try again later.",
"lightbox_start_slideshow": "Start slideshow",
"lightbox_stop_slideshow": "Stop slideshow",
"lightbox_full_screen": "Full screen",
"lightbox_thumbnails": "Thumbnails",
"lightbox_download": "Download",
"lightbox_share": "Share",
"lightbox_zoom": "Zoom",
"lightbox_new_window": "New window",
"lightbox_toggle_sidebar": "Toggle sidebar"
}
Not even finished 50% but just sharing this snippit
KeyEventDemo.java
(this is a native java app (.jar))
Code:
/*
* Copyright (c) 1995, 2008, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle or the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Created by broodplank
package main;
/*
* KeyEventDemo
*/
import java.awt.BorderLayout;
import java.awt.Container;
import java.awt.Dimension;
import java.awt.event.*;
import java.io.Console;
import java.io.IOException;
import javax.swing.*;
public class KeyEventDemo extends JFrame
implements KeyListener,
ActionListener
{
JTextArea displayArea;
JTextField typingArea;
static final String newline = System.getProperty("line.separator");
public static void main(String[] args) {
/* Use an appropriate Look and Feel */
try {
UIManager.setLookAndFeel("javax.swing.plaf.metal.MetalLookAndFeel");
} catch (UnsupportedLookAndFeelException ex) {
ex.printStackTrace();
} catch (IllegalAccessException ex) {
ex.printStackTrace();
} catch (InstantiationException ex) {
ex.printStackTrace();
} catch (ClassNotFoundException ex) {
ex.printStackTrace();
}
UIManager.put("swing.boldMetal", Boolean.FALSE);
javax.swing.SwingUtilities.invokeLater(new Runnable() {
public void run() {
createAndShowGUI();
}
});
}
/**
* Create the GUI and show it. For thread safety,
* this method should be invoked from the
* event-dispatching thread.
*/
private static void createAndShowGUI() {
//Create and set up the window.
KeyEventDemo frame = new KeyEventDemo("CapKeyToAndroid");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
//Set up the content pane.
frame.addComponentsToPane();
//Display the window.
frame.pack();
frame.setVisible(true);
}
private void addComponentsToPane() {
JButton button = new JButton("Clear");
button.addActionListener(this);
typingArea = new JTextField(20);
typingArea.addKeyListener(this);
//Uncomment this if you wish to turn off focus
//traversal. The focus subsystem consumes
//focus traversal keys, such as Tab and Shift Tab.
//If you uncomment the following line of code, this
//disables focus traversal and the Tab events will
//become available to the key event listener.
//typingArea.setFocusTraversalKeysEnabled(false);
displayArea = new JTextArea();
displayArea.setEditable(false);
JScrollPane scrollPane = new JScrollPane(displayArea);
scrollPane.setPreferredSize(new Dimension(375, 125));
getContentPane().add(typingArea, BorderLayout.PAGE_START);
getContentPane().add(scrollPane, BorderLayout.CENTER);
getContentPane().add(button, BorderLayout.PAGE_END);
}
public KeyEventDemo(String name) {
super(name);
}
/** Handle the key pressed event from the text field. */
public void keyPressed(KeyEvent e) {
try {
int keyCode = e.getKeyCode();
int KeyCode = (keyCode);
System.out.println(KeyCode);
// a to z
if (KeyCode>64) {
if (KeyCode<91) {
int KeyCode1 = (keyCode-36);
Runtime.getRuntime().exec("adb shell input keyevent "+KeyCode1);
}
}
//chars 0 to 9
if (KeyCode>47) {
if (KeyCode<58) {
int KeyCode1 = (keyCode-41);
Runtime.getRuntime().exec("adb shell input keyevent "+KeyCode1);
}
}
//backspace
if (KeyCode==8) {
Runtime.getRuntime().exec("adb shell input keyevent 67");
}
//enter
if (KeyCode==10) {
Runtime.getRuntime().exec("adb shell input keyevent 66");
}
//space
if (KeyCode==32) {
Runtime.getRuntime().exec("adb shell input keyevent 62");
}
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
/** Handle the button click. */
public void actionPerformed(ActionEvent e) {
//Clear the text components.
displayArea.setText("");
typingArea.setText("");
//Return the focus to the typing area.
typingArea.requestFocusInWindow();
}
[user=439709]@override[/user]
public void keyTyped(KeyEvent e) {
// TODO Auto-generated method stub
}
[user=439709]@override[/user]
public void keyReleased(KeyEvent e) {
// TODO Auto-generated method stub
}
}

Why make it difficult? Why not otg with a usb keyboard ?
Its a good idea but I'm unsure why it would be needed when it can be done directly on the device.
Sent from my I9505ro.product.brand=samsung using xda app-developers app

JustSueMe said:
Why make it difficult? Why not otg with a usb keyboard ?
Its a good idea but I'm unsure why it would be needed when it can be done directly on the device.
Sent from my I9505ro.product.brand=samsung using xda app-developers app
Click to expand...
Click to collapse
Or better yet, just use a BT keyboard and be done/

Take a look at this. Works well, I use it almost daily.
https://sites.google.com/site/droidskm/

FrozenDragoon said:
Take a look at this. Works well, I use it almost daily.
https://sites.google.com/site/droidskm/
Click to expand...
Click to collapse
Eww windows program. Shoo shoo.
d[-_-]b
Sent from my GT-I9505 using xda app-developers app

otg is better of course, I agree, just sharing some snippets now and then that might be useful for developers.

Related

Obtaining Authentication Information for Huawei Drive Kit

Obtaining Authentication Information
Use Case
HUAWEI ID is required for a user uses to access and manage their files in Drive. Before your app can use the Drive SDK, it needs to prompt the user to sign in with their HUAWEI ID. After the user signs in, your app saves relevant user information for calling Drive SDK APIs later.
Service Process
The figure below shows the basic service process.
{
"lightbox_close": "Close",
"lightbox_next": "Next",
"lightbox_previous": "Previous",
"lightbox_error": "The requested content cannot be loaded. Please try again later.",
"lightbox_start_slideshow": "Start slideshow",
"lightbox_stop_slideshow": "Stop slideshow",
"lightbox_full_screen": "Full screen",
"lightbox_thumbnails": "Thumbnails",
"lightbox_download": "Download",
"lightbox_share": "Share",
"lightbox_zoom": "Zoom",
"lightbox_new_window": "New window",
"lightbox_toggle_sidebar": "Toggle sidebar"
}
Development Procedure
Step 1 Integrate the HMS SDK.
To use the Drive SDK, your app needs to obtain user information through the HMS SDK first. For details, please refer to Integrating the Drive SDK and HMS SDK.
Step 2 Set the HMS scope.
Set the Drive scope by calling the HuaweiIdAuthParamsHelper.setScopeList API provided by the HMS SDK to obtain the permission to access Drive APIs.
Code:
List<Scope> scopeList = new LinkedList<>();
scopeList.add(HuaweiIdAuthAPIManager.HUAWEIID_BASE_SCOPE);// Basic account permissions
scopeList.add(new Scope(DriveScopes.SCOPE_DRIVE));// All permissions, except permissions for the app folder.
scopeList.add(new Scope(DriveScopes.SCOPE_DRIVE_FILE));// Permissions to view and manage files.
scopeList.add(new Scope(DriveScopes.SCOPE_DRIVE_METADATA));// Permissions to view and manage file metadata, excluding file content.
scopeList.add(new Scope(DriveScopes.SCOPE_DRIVE_METADATA_READONLY));// Permissions only for viewing file metadata, excluding file entities.
scopeList.add(new Scope(DriveScopes.SCOPE_DRIVE_READONLY));// Permissions to view file metadata and content.
scopeList.add(new Scope(DriveScopes.SCOPE_DRIVE_APPDATA));// Permissions to view and manage app files.
HuaweiIdAuthParams params = new HuaweiIdAuthParamsHelper(HuaweiIdAuthParams.DEFAULT_AUTH_REQUEST_PARAM)
.setAccessToken()
.setIdToken()
.setScopeList(scopeList)
.createParams();
The access permissions vary according to the Drive scope. You may apply for the permissions as needed. For details, please refer to Signing In with HUAWEI ID.
Step 3 Store authentication information.
After obtaining the user information, save the unionId and AccessToken of the user for calling the Drive SDK whenever needed. The following provides an example implementation method. You may use another implementation method if necessary.
Sample code:
Creates the CredentialManager class.
Code:
/**
* Credential management class
* <p>
* since 1.0
*/
public class CredentialManager {
/**
* Drive authentication information.
*/
private DriveCredential mCredential;
private CredentialManager() {
}
private static class InnerHolder {
private static CredentialManager sInstance = new CredentialManager();
}
/**
* Singleton CredentialManager instance.
*
* @return CredentialManager
*/
public static CredentialManager getInstance() {
return InnerHolder.sInstance;
}
/**
* Initialize Drive based on the context and HUAWEI ID information including unionId, countrycode, and accessToken.
* When the current accessToken expires, register an AccessMethod and obtain a new accessToken.
*
* @param unionID unionID in HwID
* @param at access token
* @param refreshAT Callback function for refreshing the access token.
*/
public int init(String unionID, String at, DriveCredential.AccessMethod refreshAT) {
if (StringUtils.isNullOrEmpty(unionID) || StringUtils.isNullOrEmpty(at)) {
return DriveCode.ERROR;
}
DriveCredential.Builder builder = new DriveCredential.Builder(unionID, refreshAT);
mCredential = builder.build().setAccessToken(at);
return DriveCode.SUCCESS;
}
/**
* Obtain DriveCredential.
*
* @return DriveCredential
*/
public DriveCredential getCredential() {
return mCredential;
}
/**
* Exit Drive and clear all cache information generated during use.
*/
public void exit(Context context) {
// Clear cache files.
deleteFile(context.getCacheDir());
deleteFile(context.getFilesDir());
}
/**
* Delete cache files.
*
* @param file Designated cache file
*/
private static void deleteFile(java.io.File file) {
if (null == file || !file.exists()) {
return;
}
if (file.isDirectory()) {
java.io.File[] files = file.listFiles();
if (files != null) {
for (java.io.File f : files) {
deleteFile(f);
}
}
}
}
}
Call initDrive in the onActivityResult callback after the user signs in.
Code:
private static DriveCredential.AccessMethod refreshAT = new DriveCredential.AccessMethod() {
@Override
public String refreshToken() {
// Obtain a new access token through the HMS SDK.
return refreshAccessToken();
}
};
private void initDrive() {
// signInHuaweiId is the HUAWEI ID that the user used to sign in. Based on signInHuaweiId, obtain unionId and AccessToken.
final String unionID = signInHuaweiId.getUnionId();
final String accessToken = signInHuaweiId.getAccessToken();
int returnCode = CredentialManager.getInstance().init(unionID, accessToken, refreshAT);
if (DriveCode.SUCCESS == returnCode) {
//Jump to the app home page after successful initialization.
jumpToMainActivity();
}
}

Android: How to Develop an ID Photo DIY Applet in 30 Min

The source code will be shared and it's quite friendly to users who are fresh to Android. The whole process will take only 30 minutes.
It's an application level development and we won't go through the algorithm of image segmentation. I use Huawei Mlkit help to develop this app and it provides the capability of image segmentation. Developers will learn how to quickly develop a ID photo DIY applet using such SDK.
Background
I don't know if you have had such an experience. All of a sudden, schools or companies needed to provide one inch or two inch head photos of individuals. They needed to apply for a passport or student card which have requirements for the background color of the photos. However, many people don't have time to take photos at the photo studio. Or they have taken them before, but the background color of the photos doesn't meet the requirements. I had a similar experience. At that time, the school asked for a passport, and the school photo studio was closed again. I took photos with my mobile phone in a hurry, and then used the bedspread as the background to deal with it. As a result, I was scolded by the teacher.
Many years later, mlkit machine learning has the function of image segmentation. Using this SDK to develop a small program of certificate photo DIY could perfectly solve the embarrassment in that year.
Here is the demo for the result.
{
"lightbox_close": "Close",
"lightbox_next": "Next",
"lightbox_previous": "Previous",
"lightbox_error": "The requested content cannot be loaded. Please try again later.",
"lightbox_start_slideshow": "Start slideshow",
"lightbox_stop_slideshow": "Stop slideshow",
"lightbox_full_screen": "Full screen",
"lightbox_thumbnails": "Thumbnails",
"lightbox_download": "Download",
"lightbox_share": "Share",
"lightbox_zoom": "Zoom",
"lightbox_new_window": "New window",
"lightbox_toggle_sidebar": "Toggle sidebar"
}
How effective is it, is it great, just need to write a small program to quickly achieve!
Core Tip: This SDK is free, and all Android models are covered!
ID photo development actual combat
1. Preparation
1.1 Add Huawei Maven Warehouse in Project Level Gradle
Open the Android studio project level build.gradle file.
Add the following Maven addresses:
Code:
[CODE]buildscript {
repositories {
maven {url 'http://developer.huawei.com/repo/'}
} }allprojects {
repositories {
maven { url 'http://developer.huawei.com/repo/'}
}}
[/CODE]
1.2 Add SDK Dependency in Application Level build.gradle
Code:
Introducing SDK and basic SDK of face recognition:
dependencies{
implementation 'com.huawei.hms:ml-computer-vision:1.0.2.300'
implementation 'com.huawei.hms:ml-computer-vision-image-segmentation-body-model:1.0.2.301' }
1.3 Add Model in Android manifest.xml File
To enable the application to automatically update the latest machine learning model to the user's device after the user installs your application from the Huawei application market. Add the following statement to the Android manifest.xml file of the application:
Code:
<manifest
<application
<meta-data
android:name="com.huawei.hms.ml.DEPENDENCY"
android:value= "imgseg "/>
</application></manifest>
1.4 Apply for Camera and Storage Permission in Android manifest.xml File
Code:
<!--使用存储权限--><uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
2. Two Key Steps of Code Development
2.1 Dynamic Authority Application
Code:
@Overrideprotected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
if (!allPermissionsGranted()) {
getRuntimePermissions();
}}@Overridepublic void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions,
@NonNull int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
if (requestCode != PERMISSION_REQUESTS) {
return;
}
boolean isNeedShowDiag = false;
for (int i = 0; i < permissions.length; i++) {
if (permissions[i].equals(Manifest.permission.READ_EXTERNAL_STORAGE) && grantResults[i] != PackageManager.PERMISSION_GRANTED) {
isNeedShowDiag = true;
}
}
if (isNeedShowDiag && !ActivityCompat.shouldShowRequestPermissionRationale(this, Manifest.permission.CALL_PHONE)) {
AlertDialog dialog = new AlertDialog.Builder(this)
.setMessage(getString(R.string.camera_permission_rationale))
.setPositiveButton(getString(R.string.settings), new DialogInterface.OnClickListener() {
@Override public void onClick(DialogInterface dialog, int which) {
Intent intent = new Intent(Settings.ACTION_APPLICATION_DETAILS_SETTINGS);
intent.setData(Uri.parse("package:" + getPackageName())); // 根据包名打开对应的设置界面
startActivityForResult(intent, 200);
startActivity(intent);
}
})
.setNegativeButton(getString(R.string.cancel), new DialogInterface.OnClickListener() {
@Override public void onClick(DialogInterface dialog, int which) {
finish();
}
}).create();
dialog.show();
}}
2.2 Creating an Image Segmentation Detector
Code:
The image segmentation detector can be created through the image segmentation detection configurator "mlimagesegmentation setting".
MLImageSegmentationSetting setting = new MLImageSegmentationSetting.Factory()
.setAnalyzerType(MLImageSegmentationSetting.BODY_SEG)
.setExact(true)
.create();
this.analyzer = MLAnalyzerFactory.getInstance().getImageSegmentationAnalyzer(setting);
2.3 Create "mlframe" Object through android.graphics.bitmap for Analyzer to Detect Pictures
The image segmentation detector can be created through the image segmentation detection configurator "MLImageSegmentationSetting".
MLFrame mlFrame = new MLFrame.Creator().setBitmap(this.originBitmap).create();
2.4 Call "asyncanalyseframe" Method for Image Segmentation
Code:
// 创建一个task,处理图像分割检测器返回的结果。 Task<MLImageSegmentation> task = analyzer.asyncAnalyseFrame(frame); // 异步处理图像分割检测器返回结果 Task<MLImageSegmentation> task = this.analyzer.asyncAnalyseFrame(mlFrame);
task.addOnSuccessListener(new OnSuccessListener<MLImageSegmentation>() {
@Override public void onSuccess(MLImageSegmentation mlImageSegmentationResults) {
// Transacting logic for segment success.
if (mlImageSegmentationResults != null) {
StillCutPhotoActivity.this.foreground = mlImageSegmentationResults.getForeground();
StillCutPhotoActivity.this.preview.setImageBitmap(StillCutPhotoActivity.this.foreground);
StillCutPhotoActivity.this.processedImage = ((BitmapDrawable) ((ImageView) StillCutPhotoActivity.this.preview).getDrawable()).getBitmap();
StillCutPhotoActivity.this.changeBackground();
} else {
StillCutPhotoActivity.this.displayFailure();
}
}
}).addOnFailureListener(new OnFailureListener() {
@Override public void onFailure(Exception e) {
// Transacting logic for segment failure.
StillCutPhotoActivity.this.displayFailure();
return;
}
});
2.5 Change the Picture Background
Code:
this.backgroundBitmap = BitmapUtils.loadFromPath(StillCutPhotoActivity.this, id, targetedSize.first, targetedSize.second);BitmapDrawable drawable = new BitmapDrawable(backgroundBitmap);
this.preview.setDrawingCacheEnabled(true);
this.preview.setBackground(drawable);
this.preview.setImageBitmap(this.foreground);
this.processedImage = Bitmap.createBitmap(this.preview.getDrawingCache());
this.preview.setDrawingCacheEnabled(false);
Conclusion
In this way, a small program of ID photo DIY has been made. Let's see the demo.
If you have strong hands-on ability, you can also add and change suits or other operations. The source code has been uploaded to GitHub. You can also improve this function on GitHub.
https://github.com/HMS-Core/hms-ml-demo/tree/master/ID-Photo-DIY
Please stamp the source code address of GitHub (the project directory is id-photo-diy).
Based on the ability of image segmentation, it cannot only be used to do the DIY program of ID photo, but also realize the following related functions:
1. People's portraits in daily life can be cut out, some interesting photos can be made by changing the background, or the background can be virtualized to get more beautiful and artistic photos.
2. Identify the sky, plants, food, cats and dogs, flowers, water surface, sand surface, buildings, mountains and other elements in the image, and make special beautification for these elements, such as making the sky bluer and the water clearer.
3. Identify the objects in the video stream, edit the special effects of the video stream, and change the background.
For other functions, please brainstorm together!
For a more detailed development guide, please refer to the official website of Huawei developer Alliance:
https://developer.huawei.com/consumer/en/doc/development/HMS-Guides/ml-introduction-4

Make your music player with HMS Audio Kit: Part 2

More information like this, you can visit HUAWEI Developer Forum​
Original article link: https://forums.developer.huawei.com/forumPortal/en/topicview?tid=0202327658275700021&fid=0101187876626530001
{
"lightbox_close": "Close",
"lightbox_next": "Next",
"lightbox_previous": "Previous",
"lightbox_error": "The requested content cannot be loaded. Please try again later.",
"lightbox_start_slideshow": "Start slideshow",
"lightbox_stop_slideshow": "Stop slideshow",
"lightbox_full_screen": "Full screen",
"lightbox_thumbnails": "Thumbnails",
"lightbox_download": "Download",
"lightbox_share": "Share",
"lightbox_zoom": "Zoom",
"lightbox_new_window": "New window",
"lightbox_toggle_sidebar": "Toggle sidebar"
}
If you followed the part one here, you know where we left off. In this part of the tutorial, we will be implemeting onCreate in detail, including button clicks and listeners which is a crucial part in playback control.
Let’s start by implemeting add/remove listeners to control the listener attachment. Then implement a small part of onCreate method.
Code:
public void addListener(HwAudioStatusListener listener) {
if (mHwAudioManager != null) {
try {
mHwAudioManager.addPlayerStatusListener(listener);
} catch (RemoteException e) {
Log.e("TAG", "TAG", e);
}
} else {
mTempListeners.add(listener);
}
}
public void removeListener(HwAudioStatusListener listener) { //will be called in onDestroy() method
if (mHwAudioManager != null) {
try {
mHwAudioManager.removePlayerStatusListener(listener);
} catch (RemoteException e) {
Log.e("TAG", "TAG", e);
}
}
mTempListeners.remove(listener);
}
@Override
protected void onDestroy() {
if(mHwAudioPlayerManager!= null && isReallyPlaying){
isReallyPlaying = false;
mHwAudioPlayerManager.stop();
removeListener(mPlayListener);
}
super.onDestroy();
}
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
binding = ActivityMainBinding.inflate(getLayoutInflater());
View view = binding.getRoot();
setContentView(view);
//I set the MainActivity cover image as a placeholder image to cover for those audio files which do not have a cover image.
binding.albumPictureImageView.setImageDrawable(getDrawable(R.drawable.ic_launcher_foreground));
initializeManagerAndGetPlayList(this); //I call my method to set my playlist
addListener(mPlayListener); //I add my listeners
}
If you followed the first part closely, you already have the mTempListeners variable. So, in onCreate, we first initialize everything and then attach our listener to it.
Now that our playlist is programatically ready, if you try to run your code, you will not be able to control what you want because we have not touched our views yet.
Let’s implement our basic buttons first.
Code:
final Drawable drawablePlay = getDrawable(R.drawable.btn_playback_play_normal);
final Drawable drawablePause = getDrawable(R.drawable.btn_playback_pause_normal);
binding.playButtonImageView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if(binding.playButtonImageView.getDrawable().getConstantState().equals(drawablePlay.getConstantState())){
if (mHwAudioPlayerManager != null){
mHwAudioPlayerManager.play();
binding.playButtonImageView.setImageDrawable(getDrawable(R.drawable.btn_playback_pause_normal));
isReallyPlaying = true;
}
}
else if(binding.playButtonImageView.getDrawable().getConstantState().equals(drawablePause.getConstantState())){
if (mHwAudioPlayerManager != null) {
mHwAudioPlayerManager.pause();
binding.playButtonImageView.setImageDrawable(getDrawable(R.drawable.btn_playback_play_normal));
isReallyPlaying = false;
}
}
}
});
binding.nextSongImageView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (mHwAudioPlayerManager != null) {
mHwAudioPlayerManager.playNext();
isReallyPlaying = true;
}
}
});
binding.previousSongImageView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (mHwAudioPlayerManager != null) {
mHwAudioPlayerManager.playPre();
isReallyPlaying = true;
}
}
});
Do these in your onCreate(…) method. isReallyPlaying is a global boolean variable that I created to keep track of playback. I update it everytime the playback changes. You do not have to have it, as I also tested it with online playlists. I still keep there in case you also want to test your app with online playlists, as in the sample apps provided by Huawei.
For button visual changes, I devised a method like above and I am not asserting that it is the best. You can use your own method but onClicks should rougly be like this.
Now we should implement our listener so that we can track the change in playback. It will also include updating the UI elements, so that the app tracks the audio file changes and seekbar updates. Put this listener outside onCreate(…) method, it will be called anytime it is required. What we should do is to implement necessary methods to respond this calling.
Code:
HwAudioStatusListener mPlayListener = new HwAudioStatusListener() {
@Override
public void onSongChange(HwAudioPlayItem hwAudioPlayItem) {
setSongDetails(hwAudioPlayItem);
if(mHwAudioPlayerManager.getOffsetTime() != -1 && mHwAudioPlayerManager.getDuration() != -1)
updateSeekBar(mHwAudioPlayerManager.getOffsetTime(), mHwAudioPlayerManager.getDuration());
}
@Override
public void onQueueChanged(List list) {
if (mHwAudioPlayerManager != null && list.size() != 0 && !isReallyPlaying) {
mHwAudioPlayerManager.play();
isReallyPlaying = true;
binding.playButtonImageView.setImageDrawable(getDrawable(R.drawable.btn_playback_pause_normal));
}
}
@Override
public void onBufferProgress(int percent) {
}
@Override
public void onPlayProgress(final long currentPosition, long duration) {
updateSeekBar(currentPosition, duration);
}
@Override
public void onPlayCompleted(boolean isStopped) {
if (mHwAudioPlayerManager != null && isStopped) {
mHwAudioPlayerManager.playNext();
}
isReallyPlaying = !isStopped;
}
@Override
public void onPlayError(int errorCode, boolean isUserForcePlay) {
Toast.makeText(MainActivity.this, "We cannot play this!!", Toast.LENGTH_LONG).show();
}
@Override
public void onPlayStateChange(boolean isPlaying, boolean isBuffering) {
if(isPlaying || isBuffering){
binding.playButtonImageView.setImageDrawable(getDrawable(R.drawable.btn_playback_pause_normal));
isReallyPlaying = true;
}
else{
binding.playButtonImageView.setImageDrawable(getDrawable(R.drawable.btn_playback_play_normal));
isReallyPlaying = false;
}
}
};
Now let’s understand the snippet above. Status listener instance wants us to implement some methods shown. They are not required but they will make our lives easier. I will explain the methods inside of them later.
Understanding HwAudioStatusListener
onSongChange is called upon a song change in the queue, i.e. whenever the you skip a song for example, it will be called. Also, it is called for the first time you set the playlist because technically your song changed, from nothing to your index 0 audio file.
onQueueChanged is called upon the queue is altered. I implemented some code just in case but since we will not be dealing with adding/removing items to the playlist and change the queues, it is not very important.
onBufferProgress returns the progress as percentage when you are using online audio.
onPlayProgress is one of the most important method here, everytime the playback is changed, it is called. That is, even the audio files proceed one second (i.e. plays) it will be called. So, it is wise to call UI updates here, rather than dealing with heavy-load fragments, in my opinion.
onPlayCompleted lets you decide the behaviour when the playlist finishes playing. You can restart the playlist, just stop or do something else (like notifying the user that the playlist has ended etc.). It is totally up to you. I restart the playlist in the sample code.
onPlayError is called whenever there is an error occurred in the playback. It could, for example, be that the buffered song is not loaded correctly, format not supported, audio file is corrupted etc. You can handle the result here or you can just notify the user that the file cannot be played; just like I did.
And finally, onPlayStateChange is called whenever the music is paused/re-played. Thus, you can handle the changes here in general. I update isReallyPlaying variable and play/pause button views inside and outside of this method just to be sure. You can update it just here if you want. It should work in theory.
Extra Methods
This listener contains two extra custom methods that I wrote to update the UI and set/render the details of the song on the screen. They are called setSongDetails(…) and updateSeekBar(…). Let’s see how they are implemented.
Code:
public void updateSeekBar(final long currentPosition, long duration){
//seekbar
binding.musicSeekBar.setMax((int) (duration / 1000));
if(mHwAudioPlayerManager != null){
int mCurrentPosition = (int) (currentPosition / 1000);
binding.musicSeekBar.setProgress(mCurrentPosition);
setProgressText(mCurrentPosition);
}
binding.musicSeekBar.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
//Log.i("ONSTOPTRACK", "STOP TRACK TRIGGERED.");
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
//Log.i("ONSTARTTRACK", "START TRACK TRIGGERED.");
}
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
if(mHwAudioPlayerManager != null && fromUser){
mHwAudioPlayerManager.seekTo(progress*1000);
}
if(!isReallyPlaying){
setProgressText(progress); //when the song is not playing and user updates the seekbar, seekbar still should be updated
}
}
});
}
public void setProgressText(int progress){
String progressText = String.format(Locale.US, "d:d",
TimeUnit.MILLISECONDS.toMinutes(progress*1000),
TimeUnit.MILLISECONDS.toSeconds(progress*1000) -
TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(progress*1000))
);
binding.progressTextView.setText(progressText);
}
public void setSongDetails(HwAudioPlayItem currentItem){
if(currentItem != null){
getBitmapOfCover(currentItem);
if(!currentItem.getAudioTitle().equals(""))
binding.songNameTextView.setText(currentItem.getAudioTitle());
else{
binding.songNameTextView.setText("Try choosing a song");
}
if(!currentItem.getSinger().equals(""))
binding.artistNameTextView.setText(currentItem.getSinger());
else
binding.artistNameTextView.setText("From the playlist");
binding.albumNameTextView.setText("Album Unknown"); //there is no field assinged to this in HwAudioPlayItem
binding.progressTextView.setText("00:00"); //initial progress of every song
long durationTotal = currentItem.getDuration();
String totalDurationText = String.format(Locale.US, "d:d",
TimeUnit.MILLISECONDS.toMinutes(durationTotal),
TimeUnit.MILLISECONDS.toSeconds(durationTotal) -
TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(durationTotal))
);
binding.totalDurationTextView.setText(totalDurationText);
}
else{
//Here is measure to prevent the bad first opening of the app. After user
//selects a song from the playlist, here should never be executed again.
binding.songNameTextView.setText("Try choosing a song");
binding.artistNameTextView.setText("From the playlist");
binding.albumNameTextView.setText("It is at right-top of the screen");
}
}
Remarks
I format the time everytime I receive it because currently AudioKit sends them as miliseconds and the user needs to see in 00:00 format.
I divide by 1000 and while seeking to progress, multiply by 1000 to keep the changes visible on screen. It is also more convenient and accustomed way of implementing seekbars.
I take measure by if checks in case they return null. It should not happen except the first opening but still, as developers, we should be careful.
Please follow comments in the code for additional information.
That’s it for this part. However, we are not done yet. As I mentioned earlier we should also implement the playlist to choose songs from and implement advanced playback controls "); background-size: 1px 1px; background-position: 0px calc(1em + 1px); box-sizing: inherit; font-family: medium-content-serif-font, Georgia, Cambria, "Times New Roman", Times, serif; font-weight: 700; font-size: 18px; text-decoration: underline;">in part 3. See you there!
HTML:
sujith.e said:
Well explained,do we need login ?
Click to expand...
Click to collapse
Yeah, you need to log in

Faster and safest internet with hQuic Part-1

Overview
QUIC (Quick UDP Internet Connections) is a general-purpose transport layer network protocol.
QUIC is a new transport which reduces latency compared to that of TCP. On the surface, QUIC is very similar to TCP+TLS+HTTP/2 implemented on UDP. Because TCP is implemented in operating system kernels, and middlebox firmware, making significant changes to TCP is next to impossible.
QUIC reduces round trips by working with UDP. With great features like eliminating head of line blocking, loss recovery over UDP by using multiplexed connections and congestion control, the protocol has evolved and is being widely used.
Introduction of hQUIC
hQUIC provides the ability to fulfil our needs for lower packet loss in our applications, high performance in our network operations and reliable fast connection. It supports the gQUIC protocol and provides intelligent congestion control algorithms to avoid congestions in different network environments.
Advantages
Ease of use: Streamlines APIs and shields low-level network details.
High compatibility: Supports gQUIC and Cronet.
Better experience: Outperforms other protocols in poor network conditions
{
"lightbox_close": "Close",
"lightbox_next": "Next",
"lightbox_previous": "Previous",
"lightbox_error": "The requested content cannot be loaded. Please try again later.",
"lightbox_start_slideshow": "Start slideshow",
"lightbox_stop_slideshow": "Stop slideshow",
"lightbox_full_screen": "Full screen",
"lightbox_thumbnails": "Thumbnails",
"lightbox_download": "Download",
"lightbox_share": "Share",
"lightbox_zoom": "Zoom",
"lightbox_new_window": "New window",
"lightbox_toggle_sidebar": "Toggle sidebar"
}
What is Cronet?
Cronet is the networking stack of Chromium put into a library for use on mobile. This is the same networking stack that is used in the Chrome browser by over a billion people. It offers an easy-to-use, high performance, standards-compliant, and secure way to perform HTTP requests.
Cronet is a very well implemented and tested network stack that provides almost everything that a standard app needs from a network layer library, things like DNS, Cookies, SSL/TLS, HTTP(S), HTTP2, Proxy/PAC, OSCP, Web sockets, and F̶T̶P̶(soon to be removed). It also supports HTTP over QUIC, soon to be called HTTP3. This makes it a very attractive library to use as the network layer for your mobile app.
How TCP Works?
TCP allows for transmission of information in both directions. This means that computer systems that communicate over TCP can send and receive data at the same time, similar to a telephone conversation. The protocol uses segments (packets) as the basic units of data transmission.
The actual process for establishing a connection with the TCP protocol is as follows:
1. First, the requesting sender sends the server a SYN packet or segment (SYN stands for synchronize) with a unique, random number. This number ensures full transmission in the correct order (without duplicates).
2. If the receiver has received the segment, it agrees to the connection by returning a SYN-ACK packet (ACK stands for acknowledgement) including the client's sequence number plus 1. It also transmits its own sequence number to the client.
3. Finally, the sender acknowledges the receipt of the SYN-ACK segment by sending its own ACK packet, which in this case contains the receiver's sequence number plus 1. At the same time, the client can already begin transferring data to the receiver.
Prerequisite
1. A computer with Android Studio installed and able to access the Internet
2. A Huawei phone with HMS Core (APK) 5.0.0 or later installed for debugging the app
3. Java JDK (1.7 or later)
4. Android API (level 19 or higher)
5. EMUI 3.0 or later
6. HMS Core (APK) 5.0.0 or later
Integration process
1. Open the build.gradle file in the root directory of your Android Studio project.
Code:
buildscript {
buildscript {
repositories {
maven { url 'https://developer.huawei.com/repo/' }
google()
...
}
dependencies {
classpath 'com.android.tools.build:gradle:3.3.2'
}
}
}
2. Open the build.gradle file in the app directory and add dependency on the SDK.
Code:
dependencies {
implementation 'com.huawei.hms:hquic-provider:5.0.0.300'
}
3. Initialise the service
Code:
HQUICManager.asyncInit(context, new HQUICManager.HQUICInitCallback() {
@Override
public void onSuccess() {
Log.i(TAG, "HQUICManager asyncInit success");
}
@Override
public void onFail(Exception e) {
Log.w(TAG, "HQUICManager asyncInit fail");
}
});
4. Initialise the Cronet Engine
Code:
CronetEngine.Builder builder = new CronetEngine.Builder(context);
builder.enableQuic(true);
builder.addQuicHint(getHost(url), DEFAULT_PORT, DEFAULT_ALTERNATEPORT);
cronetEngine = builder.build();
App Development
I will use APIs of the hQUIC SDK by developing a Speed Test app step by step.
I have created HQUICSericeProvider class in which I will create the service using hQuic APIs.
Let us implements the asynchronous initialization API.
Code:
public class HQUICServiceProvider {
private static final String TAG = "HQUICServiceProvider";
private static final int DEFAULT_PORT = 443;
private static final int DEFAULT_ALTERNATEPORT = 443;
private static Executor executor = Executors.newSingleThreadExecutor();
private static CronetEngine cronetEngine;
private Context context;
private UrlRequest.Callback callback;
public HQUICServiceProvider(Context context) {
this.context = context;
init();
}
public void init() {
HQUICManager.asyncInit(
context,
new HQUICManager.HQUICInitCallback() {
@Override
public void onSuccess() {
Log.i(TAG, "HQUICManager asyncInit success");
}
@Override
public void onFail(Exception e) {
Log.w(TAG, "HQUICManager asyncInit fail");
}
});
}
private CronetEngine createCronetEngine(String url) {
if (cronetEngine != null) {
return cronetEngine;
}
CronetEngine.Builder builder = new CronetEngine.Builder(context);
builder.enableQuic(true);
builder.addQuicHint(getHost(url), DEFAULT_PORT, DEFAULT_ALTERNATEPORT);
cronetEngine = builder.build();
return cronetEngine;
}
private UrlRequest buildRequest(String url, String method) {
CronetEngine cronetEngine = createCronetEngine(url);
UrlRequest.Builder requestBuilder =
cronetEngine.newUrlRequestBuilder(url, callback, executor).setHttpMethod(method);
UrlRequest request = requestBuilder.build();
return request;
}
public void sendRequest(String url, String method) {
Log.i(TAG, "callURL: url is " + url + "and method is " + method);
UrlRequest urlRequest = buildRequest(url, method);
if (null != urlRequest) {
urlRequest.start();
}
}
private String getHost(String url) {
String host = null;
try {
java.net.URL url1 = new java.net.URL(url);
host = url1.getHost();
} catch (MalformedURLException e) {
Log.e(TAG, "getHost: ", e);
}
return host;
}
public void setCallback(UrlRequest.Callback mCallback) {
callback = mCallback;
}
}
More details, you can visit https://forums.developer.huawei.com/forumPortal/en/topic/0203400466275620106
It will support only network call related information?

【ML】Free Translation — A Real-Time, Ad-Free Translation App

Preface
Free Translation is a real-time translation app that provides a range of services, including speech recognition, text translation, and text-to-speech (TTS).
Developing an AI app like Free Translation tends to require complex machine learning know-how, but integrating ML Kit makes the development quick and easy.
Use Scenarios
Free Translation is equipped to handle a wide range of user needs, for example translating content at work, assisting during travel in a foreign country, or helping with communicating with a foreign friend or learning a new language.
Development Preparations
1. Configure the Huawei Maven repository address.
2. Add build dependencies for the ML SDK.
Open the build.gradle file in the app directory of your project.
Code:
dependencies {
// Import the automatic speech recognition (ASR) plug-in.
implementation 'com.huawei.hms:ml-computer-voice-asr-plugin:2.0.3.300'
// Import the text translation SDK.
implementation 'com.huawei.hms:ml-computer-translate:2.0.4.300'
// Import the text translation algorithm package.
implementation 'com.huawei.hms:ml-computer-translate-model:2.0.4.300'
// Import the TTS SDK.
implementation 'com.huawei.hms:ml-computer-voice-tts:2.0.4.300'
// Import the bee voice package of on-device TTS.
implementation 'com.huawei.hms:ml-computer-voice-tts-model-bee:2.0.4.300'
For more details, please refer to Preparations.
Open the AndroidManifest.xml file in the main directory, and add the relevant permissions above the <application/> line.
Code:
<uses-permission android:name="android.permission.INTERNET" /> <!-- Accessing the Internet. -->
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" /> <!-- Obtaining the network status. -->
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" /><!-- Upgrading the algorithm version. -->
<uses-permission android:name="android.permission.ACCESS_WIFI_STATE" /><!-- Obtaining the Wi-Fi status. -->
<uses-permission android:name="android.permission.RECORD_AUDIO" /><!-- Recording audio data by using the recorder. -->
Development Procedure
UI Design
Customize the app UI according to your needs, and based on activity_main.xml, the layout file.
Tap on START RECOGNITION to load the ASR module, which recognizes what the user says.
Tap on SYNTHETIC VOICE to load the TTS module, which reads out the resulting translation.
{
"lightbox_close": "Close",
"lightbox_next": "Next",
"lightbox_previous": "Previous",
"lightbox_error": "The requested content cannot be loaded. Please try again later.",
"lightbox_start_slideshow": "Start slideshow",
"lightbox_stop_slideshow": "Stop slideshow",
"lightbox_full_screen": "Full screen",
"lightbox_thumbnails": "Thumbnails",
"lightbox_download": "Download",
"lightbox_share": "Share",
"lightbox_zoom": "Zoom",
"lightbox_new_window": "New window",
"lightbox_toggle_sidebar": "Toggle sidebar"
}
Function Development
You can integrate an ASR plug-in to quickly integrate the ASR service.
Code:
public void startAsr(View view) {
// Use Intent for recognition settings.
Intent intent = new Intent(this, MLAsrCaptureActivity.class)
// Set the language that can be recognized to English. If this parameter is not set, English is recognized by default. Languages supported include the following: "zh-CN": Chinese; "en-US": English; "fr-FR": French; "de-DE": German; "it-IT": Italian.
.putExtra(MLAsrCaptureConstants.LANGUAGE, Constants.ASR_SOURCE[spinnerInput.getSelectedItemPosition()])
// Set whether to display the recognition result on the speech pickup UI. MLAsrCaptureConstants.FEATURE_ALLINONE: no; MLAsrCaptureConstants.FEATURE_WORDFLUX: yes.
.putExtra(MLAsrCaptureConstants.FEATURE, MLAsrCaptureConstants.FEATURE_WORDFLUX);
// 100: request code between the current activity and speech pickup UI activity. You can use this code to obtain the processing result of the speech pickup UI.
startActivityForResult(intent, 100);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
String text;
// 100: request code between the current activity and speech pickup UI activity, which is defined above.
if (requestCode == 100) {
switch (resultCode) {
// MLAsrCaptureConstants.ASR_SUCCESS: Recognition is successful.
case MLAsrCaptureConstants.ASR_SUCCESS:
if (data != null) {
Bundle bundle = data.getExtras();
// Obtain the text information recognized from speech.
if (bundle != null && bundle.containsKey(MLAsrCaptureConstants.ASR_RESULT)) {
text = bundle.getString(MLAsrCaptureConstants.ASR_RESULT);
// Process the recognized text information.
textViewInput.setText(text);
Translation.run(this, textViewOutput, spinnerInput.getSelectedItemPosition(),
spinnerOutput.getSelectedItemPosition(), text);
}
}
break;
}
}
}
Create the Translation class to use the text translation service.
Step 1 Define the public method, which decides whether to use real-time or on-device translation.
Code:
public static void run(Activity activity, TextView textView, int sourcePosition, inttargetPosition, String sourceText) {
Log.d(TAG, Constants.TRANSLATE[sourcePosition] + ", " + Constants.TRANSLATE[targetPosition] + ", " + sourceText);
if (isOffline) {
onDeviceTranslation(activity, textView, sourcePosition, targetPosition, sourceText);
} else {
realTimeTranslation(textView, sourcePosition, targetPosition, sourceText);
}
}
Step 2 Call the real-time or on-device translation method.
Code:
private static void realTimeTranslation(final TextView textView, int sourcePosition, final int targetPosition, String sourceText) {
Log.d(TAG, "realTimeTranslation");
}
private static void onDeviceTranslation(final Activity activity, final TextView textView, final int sourcePosition, final int targetPosition, final String sourceText) {
Set<String> result = MLTranslateLanguage.syncGetLocalAllLanguages();
Log.d(TAG, "Languages supported by on-device translation: " +Arrays.toString(result.toArray()));
}
Create the TTS class to use the text-to-speech service.
Step 1 Just as with Step 1 in Translation, define the public method, which decides whether to use real-time or on-device TTS.
Code:
public static void run(Activity activity, int targetPosition, String sourceText) {
Log.d(TAG, sourceText);
if (isNotAuto || sourceText.isEmpty()) {
return;
}
if (isOffline) {
if (0 == targetPosition) {
Toast.makeText(activity, ,
Toast.LENGTH_SHORT).show();
return;
}
offlineTts(activity, Constants.TTS_TARGET[targetPosition],
Constants.TTS_TARGET_SPEAKER_OFFLINE[targetPosition], sourceText);
} else {
onlineTts(Constants.TTS_TARGET[targetPosition], Constants.TTS_TARGET_SPEAKER[targetPosition], sourceText);
}
}
Step 2 Call the real-time or on-device TTS method.
Code:
private static void onlineTts(String language, String person, String sourceText) {
Log.d(TAG, language + ", " + person + ", " + sourceText);
}
private static void offlineTts(final Activity activity, String language, final String person, finalString sourceText) {
// Use customized parameter settings to create a TTS engine.
// For details about the speaker names, please refer to the Timbres section.
final MLTtsConfig mlTtsConfig = new MLTtsConfig().setLanguage(language)
.setPerson(person)
// Set the TTS mode to on-device mode. The default mode is real-time mode.
.setSynthesizeMode(MLTtsConstants.TTS_OFFLINE_MODE);
}
Final Effects
More Information
To join in on developer discussion forums, go to Reddit.
To download the demo app and sample code, go to GitHub.
For solutions to integration-related issues, go to Stack Overflow.
More details

Categories

Resources