-
Notifications
You must be signed in to change notification settings - Fork 24
Expand file tree
/
Copy pathAudioRecognitionUsage.java
More file actions
162 lines (151 loc) · 6.3 KB
/
AudioRecognitionUsage.java
File metadata and controls
162 lines (151 loc) · 6.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
// Copyright (c) Alibaba, Inc. and its affiliates.
import java.io.File;
import java.nio.ByteBuffer;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;
import com.alibaba.dashscope.audio.asr.recognition.Recognition;
import com.alibaba.dashscope.audio.asr.recognition.RecognitionParam;
import com.alibaba.dashscope.audio.asr.recognition.RecognitionResult;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import io.reactivex.BackpressureStrategy;
import io.reactivex.Flowable;
import io.reactivex.schedulers.Schedulers;
public final class AudioRecognitionUsage {
private final AudioFormat format = buildAudioFormatInstance();
private volatile boolean stopped;
public TargetDataLine getTargetDataLineForRecord() throws LineUnavailableException {
TargetDataLine microphone = AudioSystem.getTargetDataLine(format);
System.out.println(microphone.getBufferSize());
microphone.open();
return microphone;
}
public Flowable<ByteBuffer> getMicrophoneStreaming() {
Flowable<ByteBuffer> audios =
Flowable.<ByteBuffer>create(
emitter -> {
try {
final TargetDataLine line = getTargetDataLineForRecord();
int frameSizeInBytes = format.getFrameSize();
System.out.println(
String.format(
"Frame size in bytes: %s, %s", frameSizeInBytes, format.getEncoding()));
final int bufferLengthInBytes = 1024 * frameSizeInBytes;
int numBytesRead;
line.start();
int c = 0;
byte[] data = new byte[bufferLengthInBytes];
while (!stopped && c < 100) {
if ((numBytesRead = line.read(data, 0, bufferLengthInBytes)) == -1) {
break;
} else {
System.out.println(String.format("Read microphone %s data", numBytesRead));
emitter.onNext(ByteBuffer.wrap(data, 0, numBytesRead));
}
if (c % 10 == 0) {
System.out.println(String.format("Sending %d packages", c));
}
++c;
}
emitter.onComplete();
} catch (Exception ex) {
ex.printStackTrace();
emitter.onError(ex);
}
},
BackpressureStrategy.BUFFER);
return audios;
}
public AudioFormat buildAudioFormatInstance() {
final AudioFormat.Encoding ENCODING = AudioFormat.Encoding.PCM_SIGNED;
final float RATE = 16000.0f;
final int CHANNELS = 1;
final int SAMPLE_SIZE = 16;
final boolean BIG_ENDIAN = false;
return new AudioFormat(ENCODING, RATE, SAMPLE_SIZE, CHANNELS, 2, RATE, BIG_ENDIAN);
}
public Flowable<ByteBuffer> getStreamingDataFromFile(String filePath) {
Flowable<ByteBuffer> audios =
Flowable.<ByteBuffer>create(
emmitter -> {
int totalFramesRead = 0;
File fileIn = new File(filePath);
try {
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(fileIn);
int bytesPerFrame = audioInputStream.getFormat().getFrameSize();
System.out.println(String.format("BytesPerFrame: %s", bytesPerFrame));
// Set an arbitrary buffer size of 1024 frames.
int numBytes = 1024 * bytesPerFrame;
byte[] audioBytes = new byte[numBytes];
try {
int numBytesRead = 0;
int numFramesRead = 0;
// Try to read numBytes bytes from the file.
while ((numBytesRead = audioInputStream.read(audioBytes)) != -1) {
// Calculate the number of frames actually read.
numFramesRead = numBytesRead / bytesPerFrame;
totalFramesRead += numFramesRead;
emmitter.onNext(ByteBuffer.wrap(audioBytes, 0, numBytesRead));
}
emmitter.onComplete();
System.out.println(String.format("Total frames: %d", totalFramesRead));
} catch (Exception ex) {
emmitter.onError(ex);
}
} catch (Exception e) {
emmitter.onError(e);
}
},
BackpressureStrategy.BUFFER)
.subscribeOn(Schedulers.io());
return audios;
}
public void recognitionFile(String filePath) throws ApiException, NoApiKeyException {
RecognitionParam param =
RecognitionParam.builder()
.format("pcm")
.model("paraformer-realtime-v1")
.sampleRate(16000)
.build();
Recognition rg = new Recognition();
Flowable<RecognitionResult> resultFlowable = rg.streamCall(param, getStreamingDataFromFile(filePath));
resultFlowable.blockingForEach(
message -> {
System.out.println(message);
});
}
public void recognitionRealtimeMicrophone() throws ApiException, NoApiKeyException {
RecognitionParam param =
RecognitionParam.builder()
.format("pcm")
.model("paraformer-realtime-v1")
.sampleRate(16000)
.build();
Recognition rg = new Recognition();
Flowable<RecognitionResult> resultFlowable = rg.streamCall(param, getMicrophoneStreaming());
resultFlowable
.doOnError(
err -> {
stopped = true;
System.out.println(err);
})
.blockingForEach(
message -> {
System.out.println(message);
});
}
public static void main(String[] args){
String filePath = "./src/test/resources/asr_example_cn_en.wav";
AudioRecognitionUsage audioRecognition = new AudioRecognitionUsage();
try {
audioRecognition.recognitionFile(filePath);
audioRecognition.recognitionRealtimeMicrophone();
} catch (Exception e) {
System.out.println(e.getMessage());
}
System.exit(0);
}
}