Java 类android.media.AudioRecord 实例源码

项目:KaldiAndroid    文件:RawAudioRecorder.java   
/**
 * <p>Starts the recording, and sets the state to RECORDING.</p>
 */
public void start() {
    if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
        mRecorder.startRecording();
        if (mRecorder.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
            setState(State.RECORDING);
            new Thread() {
                public void run() {
                    while (mRecorder != null && mRecorder.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
                        int status = read(mRecorder);
                        if (status < 0) {
                            break;
                        }
                    }
                }
            }.start();
        } else {
            Log.e(LOG_TAG, "startRecording() failed");
            setState(State.ERROR);
        }
    } else {
        Log.e(LOG_TAG, "start() called on illegal state");
        setState(State.ERROR);
    }
}
项目:XPermission    文件:AudioRecordManager.java   
@Override
public void run() {
    try {
        android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
        int bytesRecord;
        byte[] tempBuffer = new byte[bufferSize];
        mRecorder.startRecording();
        while (isStart) {
            if (mRecorder != null) {
                bytesRecord = mRecorder.read(tempBuffer, 0, bufferSize);
                if (bytesRecord == AudioRecord.ERROR_INVALID_OPERATION || bytesRecord ==
                        AudioRecord.ERROR_BAD_VALUE) {
                    continue;
                }
                if (bytesRecord != 0 && bytesRecord != -1) {
                    dos.write(tempBuffer, 0, bytesRecord);
                } else {
                    break;
                }
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
}
项目:androidthings-googleassistant    文件:AssistantActivity.java   
@Override
public void run() {
    ByteBuffer audioData = ByteBuffer.allocateDirect(SAMPLE_BLOCK_SIZE);
    if (mAudioInputDevice != null) {
        mAudioRecord.setPreferredDevice(mAudioInputDevice);
    }
    int result =
            mAudioRecord.read(audioData, audioData.capacity(), AudioRecord.READ_BLOCKING);
    if (result < 0) {
        Log.e(TAG, "error reading from audio stream:" + result);
        return;
    }
    Log.d(TAG, "streaming ConverseRequest: " + result);
    mAssistantRequestObserver.onNext(ConverseRequest.newBuilder()
            .setAudioIn(ByteString.copyFrom(audioData))
            .build());
    mAssistantHandler.post(mStreamAssistantRequest);
}
项目:permissions4m    文件:AudioRecordManager.java   
@Override
public void run() {
    try {
        android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
        int bytesRecord;
        byte[] tempBuffer = new byte[bufferSize];
        mRecorder.startRecording();
        while (isStart) {
            if (mRecorder != null) {
                bytesRecord = mRecorder.read(tempBuffer, 0, bufferSize);
                if (bytesRecord == AudioRecord.ERROR_INVALID_OPERATION || bytesRecord ==
                        AudioRecord.ERROR_BAD_VALUE) {
                    continue;
                }
                if (bytesRecord != 0 && bytesRecord != -1) {
                    dos.write(tempBuffer, 0, bytesRecord);
                } else {
                    break;
                }
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
}
项目:GoogleAssistantSDK    文件:VoiceRecorder_.java   
/**
 * Creates a new {@link AudioRecord}.
 *
 * @return A newly created {@link AudioRecord}, or null if it cannot be created (missing
 * permissions?).
 */
private AudioRecord createAudioRecord() {
    for (int sampleRate : SAMPLE_RATE_CANDIDATES) {
        final int sizeInBytes = AudioRecord.getMinBufferSize(sampleRate, CHANNEL, ENCODING);
        if (sizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
            continue;
        }
        final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
                sampleRate, CHANNEL, ENCODING, sizeInBytes);
        if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
            mBuffer = new byte[sizeInBytes];
            return audioRecord;
        } else {
            audioRecord.release();
        }
    }
    return null;
}
项目:XPermission    文件:AudioRecordManager.java   
/**
 * stop record
 *
 * @throws IOException
 * @throws InterruptedException
 */
public void stopRecord() throws IOException, InterruptedException {
    // specially for OPPO、XIAOMI、MEIZU、HUAWEI and so on
    Thread.sleep(250);
    destroyThread();
    if (mRecorder != null) {
        if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
            mRecorder.stop();
        }
        if (mRecorder != null) {
            mRecorder.release();
        }
    }
    if (dos != null) {
        dos.flush();
        dos.close();
    }
    length = file.length();
    deleteFile();
}
项目:androidthings-googleassistant    文件:AssistantActivity.java   
@Override
public void run() {
    ByteBuffer audioData = ByteBuffer.allocateDirect(SAMPLE_BLOCK_SIZE);
    if (mAudioInputDevice != null) {
        mAudioRecord.setPreferredDevice(mAudioInputDevice);
    }
    int result =
            mAudioRecord.read(audioData, audioData.capacity(), AudioRecord.READ_BLOCKING);
    if (result < 0) {
        Log.e(TAG, "error reading from audio stream:" + result);
        return;
    }
    Log.d(TAG, "streaming ConverseRequest: " + result);
    mAssistantRequestObserver.onNext(ConverseRequest.newBuilder()
            .setAudioIn(ByteString.copyFrom(audioData))
            .build());
    mAssistantHandler.post(mStreamAssistantRequest);
}
项目:androidthings-googleassistant    文件:AssistantActivity.java   
@Override
public void run() {
    ByteBuffer audioData = ByteBuffer.allocateDirect(SAMPLE_BLOCK_SIZE);
    if (mAudioInputDevice != null) {
        mAudioRecord.setPreferredDevice(mAudioInputDevice);
    }
    int result =
            mAudioRecord.read(audioData, audioData.capacity(), AudioRecord.READ_BLOCKING);
    if (result < 0) {
        Log.e(TAG, "error reading from audio stream:" + result);
        return;
    }
    Log.d(TAG, "streaming ConverseRequest: " + result);
    mAssistantRequestObserver.onNext(ConverseRequest.newBuilder()
            .setAudioIn(ByteString.copyFrom(audioData))
            .build());
    mAssistantHandler.post(mStreamAssistantRequest);
}
项目:AudioGenderIdentifier    文件:MainActivity.java   
@Override
    public void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        bufferSize = AudioRecord.getMinBufferSize(16000,
                AudioFormat.CHANNEL_IN_MONO,
                AudioFormat.ENCODING_PCM_16BIT);

        setButtonHandlers();
        enableButtons(false);

        inferenceInterface = new TensorFlowInferenceInterface();
        inferenceInterface.initializeTensorFlow(getAssets(), MODEL_FILE);
//        tensorFlowSample();

    }
项目:AudioGenderIdentifier    文件:MainActivity.java   
private void startRecording(){
    recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
            RECORDER_SAMPLERATE, RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING, bufferSize);

    int i = recorder.getState();
    if(i==1)
        recorder.startRecording();

    isRecording = true;

    recordingThread = new Thread(new Runnable() {

        @Override
        public void run() {
            writeAudioDataToArrayList();
            reorganiseDataArray(flattenArray());
        }
    },"AudioRecorder Thread");

    recordingThread.start();
}
项目:sonicky    文件:Decoder.java   
public byte[] listen() {
        mReceivedBytes = new byte[]{};
        mFinished = false;

        if (mAudioRec.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
            mAudioRec.startRecording();
        }

        while (true) {
            if (mFinished) break;

//            short[] audioData = readAudioData();
//            window(audioData);
//
//            int[] powerlist = new int[18];
//            for (int i = 0; i < powerlist.length; i++) {
//                powerlist[i] = goertzel(CHAR_FREQ[i]);
//            }
//            int base = goertzel(BASELINE);
//
//            updateState(powerlist, base);
//            signalToBits();
//            processByte();
        }
        return mReceivedBytes;
    }
项目:permissions4m    文件:AudioRecordManager.java   
/**
 * stop record
 *
 * @throws IOException
 * @throws InterruptedException
 */
public void stopRecord() throws IOException, InterruptedException {
    // specially for OPPO、XIAOMI、MEIZU、HUAWEI and so on
    Thread.sleep(250);
    destroyThread();
    if (mRecorder != null) {
        if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
            mRecorder.stop();
        }
        if (mRecorder != null) {
            mRecorder.release();
        }
    }
    if (dos != null) {
        dos.flush();
        dos.close();
    }
    length = file.length();
    deleteFile();
}
项目:KaldiAndroid    文件:RawAudioRecorder.java   
private int read(AudioRecord recorder) {
    int numberOfBytes = recorder.read(mBuffer, 0, mBuffer.length); // Fill buffer

    // Some error checking
    if (numberOfBytes == AudioRecord.ERROR_INVALID_OPERATION) {
        Log.e(LOG_TAG, "The AudioRecord object was not properly initialized");
        return -1;
    } else if (numberOfBytes == AudioRecord.ERROR_BAD_VALUE) {
        Log.e(LOG_TAG, "The parameters do not resolve to valid data and indexes.");
        return -2;
    } else if (numberOfBytes > mBuffer.length) {
        Log.e(LOG_TAG, "Read more bytes than is buffer length:" + numberOfBytes + ": " + mBuffer.length);
        return -3;
    } else if (numberOfBytes == 0) {
        Log.e(LOG_TAG, "Read zero bytes");
        return -4;
    }
    // Everything seems to be OK, adding the buffer to the recording.
    add(mBuffer);
    return 0;
}
项目:AC2RD    文件:RecordFileWriter.java   
private boolean stopAudioRecord(Context context)
{
    try
    {
        if (audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING)
        {
            isAudioRecordRunning = false;
            audioRecord.setRecordPositionUpdateListener(null);
            audioRecord.stop();
            audioRecordRandomAccessFile.seek(4);
            audioRecordRandomAccessFile.writeInt(Integer.reverseBytes(36 + audioRecordPayloadSize)); // 04 - Size of the overall file
            audioRecordRandomAccessFile.seek(40);
            audioRecordRandomAccessFile.writeInt(Integer.reverseBytes(audioRecordPayloadSize)); // 40 - Size of the data section
            audioRecordRandomAccessFile.close();
        }
    }
    catch (Exception e)
    {
        Log.w("RecordFileWriter", "stopAudioRecord : " + context.getString(R.string.log_record_file_writer_error_stop_audiorecord) + " : " + e);
        databaseManager.insertLog(context, "" + context.getString(R.string.log_record_file_writer_error_stop_audiorecord), new Date().getTime(), 2, false);
        return false;
    }

    return true;
}
项目:olami-android-client-sdk    文件:RecorderSpeechRecognizer.java   
private boolean isRecodingStopped() {
    if (mRecordState == RecordState.STOPPING) {
        // AudioRecord has been released, it means recorder thread is stopped.
        if (mRecord == null) {
            return true;
        } else {
            // Check AudioRecord state if recorder thread is still running.
            if (mRecord.getState() != AudioRecord.RECORDSTATE_RECORDING) {
                return true;
            }
        }
    } else if (mRecordState == RecordState.STOPPED) {
        return true;
    }

    return false;
}
项目:P-BrainAndroid    文件:HotwordDetector.java   
public void startListening() {
    synchronized (API_LOCK) {
        if (this.disabled) {
            return;
        }
        if (!isListening) {
            recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
                    RECORDER_SAMPLERATE, RECORDER_CHANNELS,
                    RECORDER_AUDIO_ENCODING, ELEMENTS_TO_RECORD * BYTES_PER_ELEMENT);
            recorder.startRecording();
            isListening = true;
            if (!isRecording) {
                isRecording = true;
                recordingThread = new Thread(new Runnable() {
                    public void run() {
                        detectHotword();
                    }
                }, "AudioRecorder Thread");
                recordingThread.start();
            }
        }
    }
}
项目:P-BrainAndroid    文件:HotwordDetector.java   
public void stopListening() {
    synchronized (API_LOCK) {
        // stops the recording activity
        isListening = false;
        isRecording = false;
        if (recordingThread != null) {
            try {
                recordingThread.join();
            } catch (InterruptedException e) {
                Log.e(TAG, "Failed to join recordingThread", e);
            }
            recordingThread = null;
        }
        if (null != recorder) {
            if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
                recorder.stop();
            }
            recorder.release();
        }
    }
}
项目:sample-googleassistant    文件:EmbeddedAssistant.java   
@Override
public void run() {
    ByteBuffer audioData = ByteBuffer.allocateDirect(AUDIO_RECORD_BLOCK_SIZE);
    int result = mAudioRecord.read(audioData, audioData.capacity(),
            AudioRecord.READ_BLOCKING);
    if (result < 0) {
        return;
    }
    mRequestHandler.post(new Runnable() {
                                  @Override
                                  public void run() {
                                      mRequestCallback.onAudioRecording();
                                  }
                              });
    mAssistantRequestObserver.onNext(ConverseRequest.newBuilder()
            .setAudioIn(ByteString.copyFrom(audioData))
            .build());
    mAssistantHandler.post(mStreamAssistantRequest);
}
项目:cythara    文件:AudioDispatcherFactory.java   
/**
 * Create a new AudioDispatcher connected to the default microphone.
 * 
 * @param sampleRate
 *            The requested sample rate.
 * @param audioBufferSize
 *            The size of the audio buffer (in samples).
 * 
 * @param bufferOverlap
 *            The size of the overlap (in samples).
 * @return A new AudioDispatcher
 */
public static AudioDispatcher fromDefaultMicrophone(final int sampleRate,
        final int audioBufferSize, final int bufferOverlap) {
    int minAudioBufferSize = AudioRecord.getMinBufferSize(sampleRate,
            android.media.AudioFormat.CHANNEL_IN_MONO,
            android.media.AudioFormat.ENCODING_PCM_16BIT);
    int minAudioBufferSizeInSamples =  minAudioBufferSize/2;
    if(minAudioBufferSizeInSamples <= audioBufferSize ){
    AudioRecord audioInputStream = new AudioRecord(
            MediaRecorder.AudioSource.MIC, sampleRate,
            android.media.AudioFormat.CHANNEL_IN_MONO,
            android.media.AudioFormat.ENCODING_PCM_16BIT,
            audioBufferSize * 2);

    TarsosDSPAudioFormat format = new TarsosDSPAudioFormat(sampleRate, 16,1, true, false);

    TarsosDSPAudioInputStream audioStream = new AndroidAudioInputStream(audioInputStream, format);
    //start recording ! Opens the stream.
    audioInputStream.startRecording();
    return new AudioDispatcher(audioStream,audioBufferSize,bufferOverlap);
    }else{
        throw new IllegalArgumentException("Buffer size too small should be at least " + (minAudioBufferSize *2));
    }
}
项目:PLDroidRTCStreaming    文件:ExtAudioCapture.java   
public void stopCapture() {
    if (!mIsCaptureStarted) {
        return;
    }

    mIsLoopExit = true;
    try {
        mCaptureThread.interrupt();
        mCaptureThread.join(1000);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    if (mAudioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
        mAudioRecord.stop();
    }

    mAudioRecord.release();

    mIsCaptureStarted = false;
    mOnAudioFrameCapturedListener = null;

    Log.d(TAG, "Stop audio capture success !");
}
项目:PLDroidRTCStreaming    文件:ExtAudioCapture.java   
@Override
public void run() {
    while (!mIsLoopExit) {
        byte[] buffer = new byte[SAMPLES_PER_FRAME * 2];
        int ret = mAudioRecord.read(buffer, 0, buffer.length);
        if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
            Log.e(TAG, "Error ERROR_INVALID_OPERATION");
        } else if (ret == AudioRecord.ERROR_BAD_VALUE) {
            Log.e(TAG, "Error ERROR_BAD_VALUE");
        } else {
            if (mOnAudioFrameCapturedListener != null) {
                mOnAudioFrameCapturedListener.onAudioFrameCaptured(buffer, System.nanoTime());
            }
        }
    }
}
项目:RapidSphinx    文件:RapidRecognizer.java   
public RecognizerThread(int timeout) {
    if (timeout != NO_TIMEOUT) {
        this.timeoutSamples = timeout * sampleRate / 1000;
    } else {
        this.timeoutSamples = NO_TIMEOUT;
    }
    this.remainingSamples = this.timeoutSamples;
    recorder = new AudioRecord(6, sampleRate, 16, 2, bufferSize * 2);
    if (recorder.getState() == AudioRecord.STATE_UNINITIALIZED) {
        recorder.release();
        try {
            throw new IOException(
                    "Failed to initialize recorder. Microphone might be already in use.");
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
项目:LittleBitLouder    文件:TOne.java   
public AudioRecord findAudioRecord() {

        for (int rate : mSampleRates) {
            for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT }) {
                for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO }) {
                    try {
                        Log.d("C.TAG", "Attempting rate " + rate + "Hz, bits: " + audioFormat +      ", channel: "
                                + channelConfig);
                        int bufferSize = AudioRecord.getMinBufferSize(rate,      AudioFormat.CHANNEL_IN_MONO , AudioFormat.ENCODING_PCM_16BIT);

                        if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
                            // check if we can instantiate and have a success
                            AudioRecord recorder = new AudioRecord(AudioSource.MIC, DEFAULT_RATE,      channelConfig, audioFormat, bufferSize);

                            if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
                                return recorder;
                        }
                    } catch (Exception e) {
                        Log.e("C.TAG", rate + "Exception, keep trying.",e);
                    }
                }
            }
        }
        return null;
    }
项目:Alexa-Voice-Service    文件:RecordAudioinBytes.java   
public void start()
{
    if(getState()==AudioRecord.STATE_INITIALIZED)
    {
        startRecording();
        if(getRecordingState()==AudioRecord.RECORDSTATE_RECORDING)
        {
            new Thread() {
                public void run() {
                    recorderLoop();
                }
            }.start();
        }  else {
        Log.e("startRecording()"," failed");
        }
    } else {
        Log.e("start()"," called on illegal state");
    }

}
项目:CXJPadProject    文件:ExtAudioRecorder.java   
/**
 * 
 * 
 * Resets the recorder to the INITIALIZING state, as if it was just created.
 * In case the class was in RECORDING state, the recording is stopped. In
 * case of exceptions the class is set to the ERROR state.
 * 
 */
public void reset() {
    try {
        if (state != State.ERROR) {
            release();
            filePath = null; // Reset file path
            cAmplitude = 0; // Reset amplitude
            if (rUncompressed) {
                audioRecorder = new AudioRecord(aSource, sRate,
                        nChannels + 1, aFormat, bufferSize);
            } else {
                mediaRecorder = new MediaRecorder();
                mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
                mediaRecorder
                        .setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
                mediaRecorder
                        .setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
            }
            state = State.INITIALIZING;
        }
    } catch (Exception e) {
        Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
        state = State.ERROR;
    }
}
项目:ScreenRecordCaptureMaster    文件:YXMicRecorder.java   
public void record() {
    final ByteBuffer bytebuffer = ByteBuffer.allocateDirect(SAMPLES_PER_FRAME);
    int bufferReadResult;

    while (isRecording) {
        bytebuffer.clear();
        bufferReadResult = mAudioRecord.read(bytebuffer, SAMPLES_PER_FRAME);

        if (bufferReadResult == AudioRecord.ERROR_INVALID_OPERATION || bufferReadResult == AudioRecord.ERROR_BAD_VALUE) {

        } else if (bufferReadResult >= 0) {
            //LogUtil.d(TAG, "bytes read "+bufferReadResult);
            // todo send this byte array to an audio encoder

            bytebuffer.position(bufferReadResult);
            bytebuffer.flip();
            byte[] bytes = new byte[bytebuffer.remaining()];
            bytebuffer.get(bytes);

            bytebuffer.position(bufferReadResult);
            bytebuffer.flip();
            yixiaAudioEncoder.encode(bytebuffer, bufferReadResult, yixiaAudioEncoder.getPTSUs());
        }
    }
}
项目:AndroidRTC    文件:WebRtcAudioRecord.java   
private boolean startRecording() {
  Logging.d(TAG, "startRecording");
  assertTrue(audioRecord != null);
  assertTrue(audioThread == null);
  try {
    audioRecord.startRecording();
  } catch (IllegalStateException e) {
    reportWebRtcAudioRecordStartError("AudioRecord.startRecording failed: " + e.getMessage());
    return false;
  }
  if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
    reportWebRtcAudioRecordStartError("AudioRecord.startRecording failed - incorrect state :"
        + audioRecord.getRecordingState());
    return false;
  }
  audioThread = new AudioRecordThread("AudioRecordJavaThread");
  audioThread.start();
  return true;
}
项目:EvilsLive    文件:AudioCapture.java   
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
        int numOfChannels, bitsPersample;
        if (channelConfig == AudioFormat.CHANNEL_IN_MONO) {
            numOfChannels = 1;
        } else {
            numOfChannels = 2;
        }
        if (AudioFormat.ENCODING_PCM_16BIT == audioFormat) {
            bitsPersample = 16;
        } else {
            bitsPersample = 8;
        }
        int periodInFrames = sampleRate * TIMER_INTERVAL / 1000;        //num of frames in a second is same as sample rate
        //refer to android/4.1.1/frameworks/av/media/libmedia/AudioRecord.cpp, AudioRecord::getMinFrameCount method
        //we times 2 for ping pong use of record buffer
        mMinBufferSize = periodInFrames * 2  * numOfChannels * bitsPersample / 8;
        if (mMinBufferSize < AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat)) {
            // Check to make sure buffer size is not smaller than the smallest allowed one
            mMinBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
            // Set frame period and timer interval accordingly
//            periodInFrames = mMinBufferSize / ( 2 * bitsPersample * numOfChannels / 8 );
        }

        return mMinBufferSize;
    }
项目:EvilsLive    文件:AudioCapture.java   
public void stopCapture() {
    if (!mIsCaptureStarted) {
        return;
    }

    mIsLoopExit = true;

    try {
        mCaptureThread.interrupt();
        mCaptureThread.join(1000);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    if (mAudioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
        mAudioRecord.stop();
    }

    mAudioRecord.release();
    mIsCaptureStarted = false;
    mAudioFrameCapturedListener = null;

    Log.d(TAG, "Stop audio capture success !");

}
项目:EvilsLive    文件:AudioCapture.java   
@Override
public void run() {
    while (!mIsLoopExit) {
        byte[] buffer = new byte[mMinBufferSize];

        int ret = mAudioRecord.read(buffer, 0, mMinBufferSize);

        if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
            Log.e(TAG, "Error ERROR_INVALID_OPERATION");
        } else if (ret == AudioRecord.ERROR_BAD_VALUE) {
            Log.e(TAG, "Error ERROR_BAD_VALUE");
        } else {
            if (mAudioFrameCapturedListener != null) {
                mAudioFrameCapturedListener.onAudioFrameCaptured(buffer);
            }
            Log.d(TAG, "OK, Captured " + ret + " bytes !");
        }
        SystemClock.sleep(10);
    }

}
项目:black-mirror    文件:VoiceRecorder.java   
/**
 * Creates a new {@link AudioRecord}.
 *
 * @return A newly created {@link AudioRecord}, or null if it cannot be created (missing
 * permissions?).
 */
private AudioRecord createAudioRecord() {
    for (int sampleRate : SAMPLE_RATE_CANDIDATES) {
        final int sizeInBytes = AudioRecord.getMinBufferSize(sampleRate, CHANNEL, ENCODING);
        if (sizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
            continue;
        }
        final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
                sampleRate, CHANNEL, ENCODING, sizeInBytes);
        if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
            mBuffer = new byte[sizeInBytes];
            return audioRecord;
        } else {
            audioRecord.release();
        }
    }
    return null;
}
项目:memento-app    文件:Recorder.java   
private void startRecording(final String filename) {
    recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
            RECORDER_SAMPLERATE,
            RECORDER_CHANNELS,
            RECORDER_AUDIO_ENCODING,
            bufferSize);
    int i = recorder.getState();
    if (i==1)
        recorder.startRecording();

    isRecording = true;

    recordingThread = new Thread(new Runnable() {
        @Override
        public void run() {
            writeAudioDataToFile(filename);
        }
    }, "AudioRecorder Thread");

    recordingThread.start();
}
项目:hack_sjtu_2017    文件:LiuLiShuo.java   
@Override
public void run() {
    byte[] buffer = new byte[audioBufferSize];
    recorder = new AudioRecord(audioSource, sampleRate, channel, audioFormat, audioBufferSize);
    try {
        while (recorder.getState() != AudioRecord.STATE_INITIALIZED)
            Thread.sleep(100, 0);
    } catch (InterruptedException e) {
        recorder.release();
        return;
    }

    recorder.startRecording();
    for (; ; ) {
        int length = recorder.read(buffer, 0, buffer.length);
        if (length < 0)
            Log.e("Record", "error: " + Integer.toString(length));
        else {
            for (WebSocket ws : wss)
                ws.sendBinary(buffer);
        }
        if (Thread.interrupted()) {
            recorder.stop();
            return;
        }
    }
}
项目:AC2RD    文件:RecordFileWriter.java   
private boolean prepareAudioRecord(Context context, File recordFile, int audioRecordChannelConfig, int chanelNumber, int audioRecordAudioFormat, int nbBitsPerSample)
{
    try
    {
        if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED)
        {
            audioRecordRandomAccessFile = new RandomAccessFile(recordFile.getAbsolutePath(), "rw");
            audioRecordRandomAccessFile.setLength(0);
            audioRecordRandomAccessFile.writeBytes("RIFF"); // 00 - Marks the file as a riff file
            audioRecordRandomAccessFile.writeInt(0); // 04 - Size of the overall file
            audioRecordRandomAccessFile.writeBytes("WAVE"); // 08 - File Type Header
            audioRecordRandomAccessFile.writeBytes("fmt "); // 12 - Format chunk marker
            audioRecordRandomAccessFile.writeInt(Integer.reverseBytes(16)); // 16 - Length of format data as listed above
            audioRecordRandomAccessFile.writeShort(Short.reverseBytes((short) 1)); // 20 - Type of format
            audioRecordRandomAccessFile.writeShort(Short.reverseBytes((short) chanelNumber)); // 22 - Number of Channels
            audioRecordRandomAccessFile.writeInt(Integer.reverseBytes(audioRecordSampleRate)); // 24 - Sample Rate
            audioRecordRandomAccessFile.writeInt(Integer.reverseBytes(audioRecordSampleRate * chanelNumber * (short)nbBitsPerSample / 8)); // 28 - ByteRate
            audioRecordRandomAccessFile.writeShort(Short.reverseBytes((short)(chanelNumber * nbBitsPerSample / 8))); // 32 - Alignment
            audioRecordRandomAccessFile.writeShort(Short.reverseBytes((short) nbBitsPerSample)); // 34 - Bits per sample
            audioRecordRandomAccessFile.writeBytes("data"); // 36 - "data" chunk header
            audioRecordRandomAccessFile.writeInt(0); // 40 - Size of the data section
            audioRecordBuffer = new byte[audioRecordPeriodInFrames * (short)nbBitsPerSample / 8 * chanelNumber];
        }
        else
        {
            Log.w("RecordFileWriter", "prepareAudioRecord : " + context.getString(R.string.log_record_file_writer_error_audiorecord_prepare));
            databaseManager.insertLog(context, "" + context.getString(R.string.log_record_file_writer_error_audiorecord_prepare), new Date().getTime(), 2, false);
            return false;
        }
    }
    catch (Exception e)
    {
        Log.w("RecordFileWriter", "prepareAudioRecord : " + context.getString(R.string.log_record_file_writer_error_audiorecord_prepare) + " : " + e);
        databaseManager.insertLog(context, "" + context.getString(R.string.log_record_file_writer_error_audiorecord_prepare), new Date().getTime(), 2, false);
        return false;
    }

    return true;
}
项目:Android_PCM_Record_Play    文件:AudioUtil.java   
private AudioUtil()
{
    //创建文件
    createFile();
    recorder = new AudioRecord(audioSource , audioRate ,
            audioChannel , audioFormat , bufferSize);
}
项目:AAVT    文件:CameraRecorder.java   
public void startRecord() throws IOException {
        synchronized (REC_LOCK){
            isRecordStarted=true;
            MediaFormat audioFormat=mConfig.getAudioFormat();
            mAudioEncoder=MediaCodec.createEncoderByType(audioFormat.getString(MediaFormat.KEY_MIME));
            mAudioEncoder.configure(audioFormat,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
            MediaFormat videoFormat=mConfig.getVideoFormat();
            mVideoEncoder=MediaCodec.createEncoderByType(videoFormat.getString(MediaFormat.KEY_MIME));
            //此处不能用mOutputSurface,会configure失败
            mVideoEncoder.configure(videoFormat,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
            mEncodeSurface=mVideoEncoder.createInputSurface();

            mAudioEncoder.start();
            mVideoEncoder.start();
            mMuxer=new MediaMuxer(mOutputPath,MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
            mRecordBufferSize = AudioRecord.getMinBufferSize(mRecordSampleRate,
                    mRecordChannelConfig, mRecordAudioFormat)*2;
//        buffer=new byte[bufferSize];
            mAudioRecord=new AudioRecord(MediaRecorder.AudioSource.MIC,mRecordSampleRate,mRecordChannelConfig,
                    mRecordAudioFormat,mRecordBufferSize);

            mAudioThread=new Thread(new Runnable() {
                @Override
                public void run() {
                    mAudioRecord.startRecording();
                    while (!audioEncodeStep(isTryStopAudio)){};
                    mAudioRecord.stop();
                }
            });
            mAudioThread.start();
            isRecordAudioStarted=true;
        }
    }
项目:dcs-sdk-java    文件:AudioRecordThread.java   
public AudioRecordThread(LinkedBlockingDeque<byte[]> linkedBlockingDeque) {
    this.linkedBlockingDeque = linkedBlockingDeque;
    bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize);
}
项目:libRtmp    文件:ControllerAudio.java   
AudioProcessor(AudioRecord audioRecord) {
    mRecordBufferSize = AndroidUntil.getRecordBufferSize();
    mRecordBuffer =  new byte[mRecordBufferSize];
    mAudioRecord = audioRecord;
    mAudioEncoder = new AudioEncoder();
    mAudioEncoder.prepareEncoder();
}
项目:libRtmp    文件:AndroidUntil.java   
public static int getRecordBufferSize() {
    int frequency = Options.getInstance().audio.frequency;
    int audioEncoding = Options.getInstance().audio.encoding;
    int channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
    if(Options.getInstance().audio.channelCount == 2) {
        channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    }
    return AudioRecord.getMinBufferSize(frequency, channelConfiguration, audioEncoding);
}
项目:libRtmp    文件:AndroidUntil.java   
@TargetApi(18)
public static AudioRecord getAudioRecord() {
    int frequency = Options.getInstance().audio.frequency;
    int audioEncoding = Options.getInstance().audio.encoding;
    int channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
    if(Options.getInstance().audio.channelCount == 2) {
        channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    }
    int audioSource = MediaRecorder.AudioSource.MIC;
    if(Options.getInstance().audio.aec) {
        audioSource = MediaRecorder.AudioSource.VOICE_COMMUNICATION;
    }
    return new AudioRecord(audioSource, frequency,
            channelConfiguration, audioEncoding, getRecordBufferSize());
}