Java 类android.media.AudioTrack 实例源码

项目:GoogleAssistantSDK    文件:SpeechService.java   
@Override
public void onCreate() {
    super.onCreate();
    mHandler = new Handler();
    fetchAccessToken();

    int outputBufferSize = AudioTrack.getMinBufferSize(16000,
            AudioFormat.CHANNEL_IN_STEREO,
            AudioFormat.ENCODING_PCM_16BIT);

    try {
        mAudioTrack = new AudioTrack(AudioManager.USE_DEFAULT_STREAM_TYPE, 16000, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, outputBufferSize, AudioTrack.MODE_STREAM);
        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
            mAudioTrack.setVolume(DEFAULT_VOLUME);
        }
        mAudioTrack.play();
    }catch (Exception e){
        e.printStackTrace();
    }
}
项目:Ftc2018RelicRecovery    文件:FtcAndroidTone.java   
/**
 * This method plays the sound data in the specified buffer.
 *
 * @param buffer specifies the sound data buffer.
 */
public void playSound(short[] buffer)
{
    final String funcName = "playSound";

    if (debugEnabled)
    {
        dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.API);
        dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.API);
    }

    audioTrack = new AudioTrack(
            AudioManager.STREAM_MUSIC,
            sampleRate,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            buffer.length*2,    //buffer length in bytes
            AudioTrack.MODE_STATIC);
    audioTrack.write(buffer, 0, buffer.length);
    audioTrack.setNotificationMarkerPosition(buffer.length);
    audioTrack.setPlaybackPositionUpdateListener(this);
    audioTrack.play();
    playing = true;
}
项目:FtcSamples    文件:FtcAndroidTone.java   
/**
 * This method plays the sound data in the specified buffer.
 *
 * @param buffer specifies the sound data buffer.
 */
public void playSound(short[] buffer)
{
    final String funcName = "playSound";

    if (debugEnabled)
    {
        dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.API);
        dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.API);
    }

    audioTrack = new AudioTrack(
            AudioManager.STREAM_MUSIC,
            sampleRate,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            buffer.length*2,    //buffer length in bytes
            AudioTrack.MODE_STATIC);
    audioTrack.write(buffer, 0, buffer.length);
    audioTrack.setNotificationMarkerPosition(buffer.length);
    audioTrack.setPlaybackPositionUpdateListener(this);
    audioTrack.play();
    playing = true;
}
项目:AndroidSdrRtlTuner    文件:AudioSink.java   
/**
 * Constructor. Will create a new AudioSink.
 *
 * @param packetSize    size of the incoming packets
 * @param sampleRate    sample rate of the audio signal
 */
public AudioSink (int packetSize, int sampleRate) {
    this.packetSize = packetSize;
    this.sampleRate = sampleRate;

    // Create the queues and fill them with
    this.inputQueue = new ArrayBlockingQueue<SamplePacket>(QUEUE_SIZE);
    this.outputQueue = new ArrayBlockingQueue<SamplePacket>(QUEUE_SIZE);
    for (int i = 0; i < QUEUE_SIZE; i++)
        this.outputQueue.offer(new SamplePacket(packetSize));

    // Create an instance of the AudioTrack class:
    int bufferSize = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
    this.audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO,
                                AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);

    // Create the audio filters:
    this.audioFilter1 = FirFilter.createLowPass(2, 1, 1, 0.1f, 0.15f, 30);
    Log.d(LOGTAG,"constructor: created audio filter 1 with " + audioFilter1.getNumberOfTaps() + " Taps.");
    this.audioFilter2 = FirFilter.createLowPass(4, 1, 1, 0.1f, 0.1f, 30);
    Log.d(LOGTAG,"constructor: created audio filter 2 with " + audioFilter2.getNumberOfTaps() + " Taps.");
    this.tmpAudioSamples = new SamplePacket(packetSize);
}
项目:sonicky    文件:Encoder.java   
private void init_(boolean eccEnabled) {
    mEccEncoder = EccInstanceProvider.getEncoder(eccEnabled);
    int minBufferSizeInBytes = AudioTrack.getMinBufferSize(
            RATE,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    // 44.1kHz mono 16bit
    mAudioTrack = new AudioTrack(
            AudioManager.STREAM_MUSIC,
            RATE,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            minBufferSizeInBytes,
            AudioTrack.MODE_STREAM);
    mExecutorService = Executors.newSingleThreadExecutor();
}
项目:dcs-sdk-java    文件:AudioTrackPlayerImpl.java   
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
    minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);
    // 解决异常IllegalArgumentException: Invalid audio buffer size
    int channelCount = 1;
    switch (channelConfig) {
        // AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
        case AudioFormat.CHANNEL_OUT_DEFAULT:
        case AudioFormat.CHANNEL_OUT_MONO:
        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
            channelCount = 1;
            break;
        case AudioFormat.CHANNEL_OUT_STEREO:
        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
            channelCount = 2;
            break;
        default:
            channelCount = Integer.bitCount(channelConfig);
    }
    // 判断minBufferSize是否在范围内,如果不在设定默认值为1152
    int frameSizeInBytes = channelCount * (audioFormat == AudioFormat.ENCODING_PCM_8BIT ? 1 : 2);
    if ((minBufferSize % frameSizeInBytes != 0) || (minBufferSize < 1)) {
        minBufferSize = 1152;
    }
    return minBufferSize;
}
项目:dcs-sdk-java    文件:AudioTrackPlayerImpl.java   
@Override
public void stop() {
    getAudioTrackCurrentPosition();
    mCurrentState = PlayState.STOPPED;
    if (writeWorkThread != null) {
        writeWorkThread.stopWrite();
    }
    try {
        Log.d(TAG, "stop-PlayState:" + mAudioTrack.getPlayState());
        if (mAudioTrack != null && mAudioTrack.getPlayState() != AudioTrack.STATE_UNINITIALIZED) {
            mAudioTrack.pause();
            mAudioTrack.flush();
            Log.d(TAG, "stop-ok");
        }
    } catch (Exception e) {
        e.printStackTrace();
        Log.d(TAG, "stop()", e);
    }
    fireStopped();
}
项目:dcs-sdk-java    文件:AudioTrackPlayerImpl.java   
@Override
public void release() {
    mCurrentState = PlayState.IDLE;
    if (writeWorkThread != null) {
        writeWorkThread.stopWrite();
    }
    try {
        Log.d(TAG, "release-PlayState:" + mAudioTrack.getPlayState());
        if (mAudioTrack != null && mAudioTrack.getPlayState() != AudioTrack.STATE_UNINITIALIZED) {
            mAudioTrack.pause();
            mAudioTrack.flush();
            mAudioTrack.stop();
            mAudioTrack.release();
            Log.d(TAG, "release-ok");
        }
    } catch (Exception e) {
        e.printStackTrace();
        Log.d(TAG, "release()", e);
    }
    fireOnRelease();
    mediaPlayerListeners.clear();
    handlerMain.removeCallbacksAndMessages(null);
}
项目:AssistantBySDK    文件:PcmPlayer.java   
public PcmPlayer(Context context, Handler handler) {
    this.mContext = context;
    this.audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, wBufferSize, AudioTrack.MODE_STREAM);
    this.handler = handler;
    audioTrack.setPlaybackPositionUpdateListener(this, handler);
    cacheDir = context.getExternalFilesDir(Environment.DIRECTORY_MUSIC);
}
项目:AssistantBySDK    文件:PcmPlayer.java   
@Override
public void onMarkerReached(AudioTrack track) {
    Log.i(TAG, "onMarkerReached>>>" + track.getNotificationMarkerPosition());
    if (playLock.tryLock()) {
        try {
            playCondition.signalAll();
        } finally {
            playLock.unlock();
        }
    }
    Log.i(TAG, "PCM SIZE=" + pcms.size());
    if (!pending.get() && pcms.size() == 0) {
        play.set(false);
        playListener.onCompleted();
    }
}
项目:buildAPKsSamples    文件:SimpleAudioOutput.java   
public AudioTrack createAudioTrack(int frameRate) {
    int minBufferSizeBytes = AudioTrack.getMinBufferSize(frameRate,
            AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_FLOAT);
    Log.i(TAG, "AudioTrack.minBufferSize = " + minBufferSizeBytes
            + " bytes = " + (minBufferSizeBytes / BYTES_PER_FRAME)
            + " frames");
    int bufferSize = 8 * minBufferSizeBytes / 8;
    int outputBufferSizeFrames = bufferSize / BYTES_PER_FRAME;
    Log.i(TAG, "actual bufferSize = " + bufferSize + " bytes = "
            + outputBufferSizeFrames + " frames");

    AudioTrack player = new AudioTrack(AudioManager.STREAM_MUSIC,
            mFrameRate, AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_PCM_FLOAT, bufferSize,
            AudioTrack.MODE_STREAM);
    Log.i(TAG, "created AudioTrack");
    return player;
}
项目:buildAPKsSamples    文件:SimpleAudioOutput.java   
public AudioTrack createAudioTrack(int frameRate) {
    int minBufferSizeBytes = AudioTrack.getMinBufferSize(frameRate,
            AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_FLOAT);
    Log.i(TAG, "AudioTrack.minBufferSize = " + minBufferSizeBytes
            + " bytes = " + (minBufferSizeBytes / BYTES_PER_FRAME)
            + " frames");
    int bufferSize = 8 * minBufferSizeBytes / 8;
    int outputBufferSizeFrames = bufferSize / BYTES_PER_FRAME;
    Log.i(TAG, "actual bufferSize = " + bufferSize + " bytes = "
            + outputBufferSizeFrames + " frames");

    AudioTrack player = new AudioTrack(AudioManager.STREAM_MUSIC,
            mFrameRate, AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_PCM_FLOAT, bufferSize,
            AudioTrack.MODE_STREAM);
    Log.i(TAG, "created AudioTrack");
    return player;
}
项目:perfectTune    文件:TuneThread.java   
@Override
public void run() {
    super.run();
    isRunning = true;
    int buffsize = AudioTrack.getMinBufferSize(sr,
            AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
    // create an audiotrack object
    AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            sr, AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT, buffsize,
            AudioTrack.MODE_STREAM);

    short samples[] = new short[buffsize];
    int amp = 10000;
    double twopi = 8.*Math.atan(1.);
    double ph = 0.0;

    // start audio
    audioTrack.play();

    // synthesis loop
    while(isRunning){
        double fr = tuneFreq;
        for(int i=0; i < buffsize; i++){
            samples[i] = (short) (amp*Math.sin(ph));
            ph += twopi*fr/sr;
        }
        audioTrack.write(samples, 0, buffsize);
    }
    audioTrack.stop();
    audioTrack.release();
}
项目:phonk    文件:PWave.java   
public PWave(AppRunner appRunner) {
    super(appRunner);
    appRunner.whatIsRunning.add(this);

    // set the buffer size
    buffsize = AudioTrack.getMinBufferSize(mSampleRate,
            AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);

    samples = new short[buffsize];

    // create an audiotrack object
    audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            mSampleRate, AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT, buffsize,
            AudioTrack.MODE_STREAM);

    // start audio
    audioTrack.play();
}
项目:EvilsLive    文件:AudioPlayer.java   
public boolean startPlayer(int streamType, int sampleRateInHz, int channelConfig, int audioFormat) {

    if (mIsPlayStarted) {
        Log.e(TAG, "Player already started !");
        return false;
    }

    mMinBufferSize = AudioTrack.getMinBufferSize(sampleRateInHz,channelConfig,audioFormat);
    if (mMinBufferSize == AudioTrack.ERROR_BAD_VALUE) {
        Log.e(TAG, "Invalid parameter !");
        return false;
    }
    Log.d(TAG , "getMinBufferSize = "+mMinBufferSize+" bytes !");

    mAudioTrack = new AudioTrack(streamType,sampleRateInHz,channelConfig,audioFormat,mMinBufferSize,DEFAULT_PLAY_MODE);
    if (mAudioTrack.getState() == AudioTrack.STATE_UNINITIALIZED) {
        Log.e(TAG, "AudioTrack initialize fail !");
        return false;
    }            

    mIsPlayStarted = true;

    Log.d(TAG, "Start audio player success !");

    return true;
}
项目:libcommon    文件:MediaAudioDecoder.java   
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
@Override
protected int handlePrepare(MediaExtractor media_extractor) {
    int track_index = selectTrack(media_extractor, "audio/");
    if (track_index >= 0) {
        final MediaFormat format = media_extractor.getTrackFormat(track_index);
        mAudioChannels = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
        mAudioSampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
        final int min_buf_size = AudioTrack.getMinBufferSize(mAudioSampleRate,
                (mAudioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
                AudioFormat.ENCODING_PCM_16BIT);
        final int max_input_size = format.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
        mAudioInputBufSize =  min_buf_size > 0 ? min_buf_size * mAudioChannels * 2 : max_input_size;
        if (mAudioInputBufSize > max_input_size) mAudioInputBufSize = max_input_size;
        final int frameSizeInBytes = mAudioChannels * 2;
        mAudioInputBufSize = (mAudioInputBufSize / frameSizeInBytes) * frameSizeInBytes;
        if (DEBUG) Log.v(TAG, String.format("getMinBufferSize=%d,max_input_size=%d,mAudioInputBufSize=%d",min_buf_size, max_input_size, mAudioInputBufSize));
    }
    return track_index;
}
项目:Android-Audio-Recorder    文件:Sound.java   
public AudioTrack generateTrack(int sampleRate, short[] buf, int len) {
    int end = len;

    int c = 0;

    if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_MONO)
        c = AudioFormat.CHANNEL_OUT_MONO;

    if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_STEREO)
        c = AudioFormat.CHANNEL_OUT_STEREO;

    // old phones bug.
    // http://stackoverflow.com/questions/27602492
    //
    // with MODE_STATIC setNotificationMarkerPosition not called
    AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate,
            c, RawSamples.AUDIO_FORMAT,
            len * (Short.SIZE / 8), AudioTrack.MODE_STREAM);
    track.write(buf, 0, len);
    if (track.setNotificationMarkerPosition(end) != AudioTrack.SUCCESS)
        throw new RuntimeException("unable to set marker");
    return track;
}
项目:Jigglypuff    文件:AndroidAudio.java   
private void requestDevice() {
    int bufferSize = (_sampleRate / _ioBaseFrequency / 2);

    // The stereo buffer should be large enough to ensure
    // that scheduling doesn't mess it up.
    _playBuffer = new short[bufferSize * _bitsInBuffer];

    // Open Audio-Player
    _audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, _sampleRate,
            AudioFormat.CHANNEL_CONFIGURATION_MONO,
            AudioFormat.ENCODING_PCM_16BIT, _bufferSizeInBytes,
            AudioTrack.MODE_STREAM);

    int recBufferSize = AudioRecord.getMinBufferSize(_sampleRate,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);

    _recBuffer = new short[recBufferSize * 10];

    // Open Audio-Recorder
    _audioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT,
            _sampleRate, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, recBufferSize);

}
项目:QRDataTransfer-Android    文件:AudioTrackManager.java   
/**
 * 设置频率
 * @param rate
 */
@SuppressWarnings("deprecation")
public void start(int rate){
    stop();
    if(rate>0){
        Hz=rate;
        waveLen = RATE / Hz;
        length = waveLen * Hz;
        audioTrack=new AudioTrack(AudioManager.STREAM_MUSIC, RATE,
                AudioFormat.CHANNEL_CONFIGURATION_STEREO, // CHANNEL_CONFIGURATION_MONO,
                AudioFormat.ENCODING_PCM_8BIT, length, AudioTrack.MODE_STREAM);
        //生成正弦波
        wave=SinWave.sin(wave, waveLen, length);
        if(audioTrack!=null){
            audioTrack.play();
        }
    }else{
        return;
    }

}
项目:Viewer    文件:AudioThread.java   
public AudioThread(int sampleRateInHz, int channel, long streamId, long decoderId, Media media)
{
    if (channel == 1)
    {
        channel_configuration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
    } else
    {
        channel_configuration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    }
    this.mediaStreamId = streamId;
    this.decoderId = decoderId;
    this.media = media;
    int minBufferSize = AudioTrack.getMinBufferSize(sampleRateInHz, channel_configuration, AudioFormat.ENCODING_PCM_16BIT);
    if (minBufferSize > audioLength)
    {
        audioLength = minBufferSize;
    }
    mAudioBuffer = new byte[audioLength];
    mAudio = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRateInHz, channel_configuration, AudioFormat.ENCODING_PCM_16BIT, audioLength, AudioTrack.MODE_STREAM);
}
项目:Android-Guitar-Tuner    文件:AndroidAudioPlayer.java   
public AndroidAudioPlayer(final AudioConfig audioConfig) {
    AudioAttributes audioAttributes = new AudioAttributes.Builder()
            .setLegacyStreamType(AudioManager.STREAM_MUSIC)
            .setUsage(AudioAttributes.USAGE_MEDIA)
            .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
            .build();

    AudioFormat audioFormat = new AudioFormat.Builder()
            .setChannelMask(audioConfig.getOutputChannel())
            .setEncoding(audioConfig.getOutputFormat())
            .setSampleRate(audioConfig.getSampleRate())
            .build();

    audioTrack = new AudioTrack(audioAttributes,
            audioFormat,
            audioConfig.getOutputBufferSize(),
            AudioTrack.MODE_STATIC,
            AudioManager.AUDIO_SESSION_ID_GENERATE);

    outputByteCount = audioConfig.getOutputFormatByteCount();
}
项目:K-Sonic    文件:Track.java   
private void initDevice(int sampleRate, int numChannels) {
    if (isJMono)
        numChannels = 2;
    mLock.lock();
    try {
        final int format = findFormatFromChannels(numChannels);
        final int minSize = AudioTrack.getMinBufferSize(sampleRate, format,
                AudioFormat.ENCODING_PCM_16BIT);
        mTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, format,
                AudioFormat.ENCODING_PCM_16BIT, minSize * 4,
                AudioTrack.MODE_STREAM);
        mSonic = new Sonic(sampleRate, numChannels);
    } catch (Exception e) {//IllegalArgumentException
        throw e;
    } finally {
        mLock.unlock();
    }
}
项目:ChristmasVoice    文件:MediaToolsProvider.java   
AudioTrack getAudioTrack(long bufferSize) {
    AudioFormat audioFormat = getAudioFormat(AudioFormat.CHANNEL_OUT_MONO);
    AudioAttributes attributes = new AudioAttributes.Builder()
            .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
            .setUsage(AudioAttributes.USAGE_MEDIA)
            .build();
    AudioTrack track = new AudioTrack.Builder()
            .setAudioFormat(audioFormat)
            .setBufferSizeInBytes((int) bufferSize)
            .setAudioAttributes(attributes)
            .setTransferMode(AudioTrack.MODE_STREAM)
            .build();
    PresetReverb reverb = getReverb();
    track.attachAuxEffect(reverb.getId());
    track.setAuxEffectSendLevel(1.0f);
    return track;
}
项目:android-sdk    文件:StreamPlayer.java   
/**
 * Play the given InputStream.
 *
 * @param stream the stream
 */
public void playStream(InputStream stream) {
  try {
    byte[] data = convertStreamToByteArray(stream);
    if (data.length > 28) {
      sampleRate = readInt(data, 24);
    }
    int headSize = 44, metaDataSize = 48;
    int destPos = headSize + metaDataSize;
    int rawLength = data.length - destPos;
    byte[] d = new byte[rawLength];
    System.arraycopy(data, destPos, d, 0, rawLength);
    initPlayer();
    audioTrack.write(d, 0, d.length);
    stream.close();
    if (audioTrack != null && audioTrack.getState() != AudioTrack.STATE_UNINITIALIZED) {
      audioTrack.release();
    }
  } catch (IOException e2) {
    Log.e(TAG, e2.getMessage());
  }
}
项目:RxAndroidAudio    文件:StreamAudioPlayer.java   
@WorkerThread
public synchronized boolean play(byte[] data, int size) {
    if (mAudioTrack != null) {
        try {
            int ret = mAudioTrack.write(data, 0, size);
            switch (ret) {
                case AudioTrack.ERROR_INVALID_OPERATION:
                    Log.w(TAG, "play fail: ERROR_INVALID_OPERATION");
                    return false;
                case AudioTrack.ERROR_BAD_VALUE:
                    Log.w(TAG, "play fail: ERROR_BAD_VALUE");
                    return false;
                case AudioManager.ERROR_DEAD_OBJECT:
                    Log.w(TAG, "play fail: ERROR_DEAD_OBJECT");
                    return false;
                default:
                    return true;
            }
        } catch (IllegalStateException e) {
            Log.w(TAG, "play fail: " + e.getMessage());
            return false;
        }
    }
    Log.w(TAG, "play fail: null mAudioTrack");
    return false;
}
项目:2016wdc    文件:StreamPlayer.java   
/**
 * Play the given InputStream
 */
public void playStream(InputStream stream) {
  try {
    byte[] data = convertStreamToByteArray(stream);
    if (data.length > 28) {
      sampleRate = readInt(data, 24);
    }
    int headSize = 44, metaDataSize = 48;
    int destPos = headSize + metaDataSize;
    int rawLength = data.length - destPos;
    byte[] d = new byte[rawLength];
    System.arraycopy(data, destPos, d, 0, rawLength);
    initPlayer();
    audioTrack.write(d, 0, d.length);
    stream.close();
    if (audioTrack != null && audioTrack.getState() != AudioTrack.STATE_UNINITIALIZED) {
      audioTrack.release();
    }
  } catch (IOException e2) {
    Log.e(TAG, e2.getMessage());
  }
}
项目:aacdecoder-android    文件:PCMFeed.java   
/**
 * Called on the listener to periodically notify it that the playback head
 * has reached a multiple of the notification period. 
 */
public void onPeriodicNotification( AudioTrack track ) {
    if (playerCallback != null) {
        int buffered = 0;

        try {
            buffered = writtenTotal - track.getPlaybackHeadPosition()*channels;
        }
        catch (IllegalStateException e) {
            Log.e( LOG, "onPeriodicNotification(): illegal state=" + track.getPlayState());

            return;
        }

        int ms = samplesToMs( buffered, sampleRate, channels );

        playerCallback.playerPCMFeedBuffer( isPlaying, ms, bufferSizeInMs );
    }
}
项目:2016EduHackathon_SanFrancisco    文件:StreamPlayer.java   
/**
 * Play the given InputStream
 */
public void playStream(InputStream stream) {
  try {
    byte[] data = convertStreamToByteArray(stream);
    if (data.length > 28) {
      sampleRate = readInt(data, 24);
    }
    int headSize = 44, metaDataSize = 48;
    int destPos = headSize + metaDataSize;
    int rawLength = data.length - destPos;
    byte[] d = new byte[rawLength];
    System.arraycopy(data, destPos, d, 0, rawLength);
    initPlayer();
    audioTrack.write(d, 0, d.length);
    stream.close();
    if (audioTrack != null && audioTrack.getState() != AudioTrack.STATE_UNINITIALIZED) {
      audioTrack.release();
    }
  } catch (IOException e2) {
    Log.e(TAG, e2.getMessage());
  }
}
项目:androidthings-googleassistant    文件:AssistantActivity.java   
@Override
public void onNext(ConverseResponse value) {
    switch (value.getConverseResponseCase()) {
        case EVENT_TYPE:
            Log.d(TAG, "converse response event: " + value.getEventType());
            break;
        case RESULT:
            final String spokenRequestText = value.getResult().getSpokenRequestText();
            if (value.getResult().getVolumePercentage() != 0) {
                mVolumePercentage = value.getResult().getVolumePercentage();
                Log.i(TAG, "assistant volume changed: " + mVolumePercentage);
                mAudioTrack.setVolume(AudioTrack.getMaxVolume() *
                    mVolumePercentage / 100.0f);
            }
            if (!spokenRequestText.isEmpty()) {
                Log.i(TAG, "assistant request text: " + spokenRequestText);
                mMainHandler.post(() -> mAssistantRequestsAdapter.add(spokenRequestText));
            }
            break;
        case AUDIO_OUT:
            final ByteBuffer audioData =
                    ByteBuffer.wrap(value.getAudioOut().getAudioData().toByteArray());
            Log.d(TAG, "converse audio size: " + audioData.remaining());
            mAssistantResponses.add(audioData);
            if (mLed != null) {
                try {
                    mLed.setValue(!mLed.getValue());
                } catch (IOException e) {
                    Log.w(TAG, "error toggling LED:", e);
                }
            }
            break;
        case ERROR:
            Log.e(TAG, "converse response error: " + value.getError());
            break;
    }
}
项目:androidthings-googleassistant    文件:AssistantActivity.java   
@Override
public void onNext(ConverseResponse value) {
    switch (value.getConverseResponseCase()) {
        case EVENT_TYPE:
            Log.d(TAG, "converse response event: " + value.getEventType());
            break;
        case RESULT:
            final String spokenRequestText = value.getResult().getSpokenRequestText();
            if (value.getResult().getVolumePercentage() != 0) {
                mVolumePercentage = value.getResult().getVolumePercentage();
                Log.i(TAG, "assistant volume changed: " + mVolumePercentage);
                mAudioTrack.setVolume(AudioTrack.getMaxVolume() *
                    mVolumePercentage / 100.0f);
            }
            mConversationState = value.getResult().getConversationState();
            if (!spokenRequestText.isEmpty()) {
                Log.i(TAG, "assistant request text: " + spokenRequestText);
                mMainHandler.post(() -> mAssistantRequestsAdapter.add(spokenRequestText));
            }
            break;
        case AUDIO_OUT:
            final ByteBuffer audioData =
                    ByteBuffer.wrap(value.getAudioOut().getAudioData().toByteArray());
            Log.d(TAG, "converse audio size: " + audioData.remaining());
            mAssistantResponses.add(audioData);
            if (mLed != null) {
                try {
                    mLed.setValue(!mLed.getValue());
                } catch (IOException e) {
                    Log.w(TAG, "error toggling LED:", e);
                }
            }
            break;
        case ERROR:
            Log.e(TAG, "converse response error: " + value.getError());
            break;
    }
}
项目:dcs-sdk-java    文件:AudioTrackPlayerImpl.java   
private AudioTrack createAudioTrack(int sampleRate) {
    int encoding = AudioFormat.ENCODING_PCM_16BIT;
    // 得到一个满足最小要求的缓冲区的大小
    int minBufferSize = getMinBufferSize(sampleRate, mChannelConfig, encoding);
    Log.d(TAG, "Decoder-AudioTrack-minBufferSize=" + minBufferSize);
    AudioTrack audioTrack =
            new AudioTrack(mStreamType,
                    sampleRate,
                    mChannelConfig,
                    encoding,
                    minBufferSize,
                    AudioTrack.MODE_STREAM);
    audioTrack.play();
    return audioTrack;
}
项目:AssistantBySDK    文件:WakeupEngineBase.java   
/**
 * 打断(停止)唤醒,调用该方法后马上停止录音,但不保证完全打断唤醒,完全打断可能会有延迟
 */
public void stopListening(){
    Log.i(TAG, "stopListening");
    if(isListening())
    try{
        stopRecord();
        if(at.getPlayState()!= AudioTrack.PLAYSTATE_STOPPED){
            stopInterrupt.set(true);
        }
    }catch(Exception e){
        e.printStackTrace();
    }
}
项目:AssistantBySDK    文件:IflySynthesizer.java   
/**
 * 判断当前合成引擎是否处于朗读状态
 *
 * @return true=是
 */
public boolean isSpeaking() {
    boolean result = false;
    try {
        result = synthesizer.isSpeaking() || pcmPlayer.getPlayState() == AudioTrack.PLAYSTATE_PLAYING;
    } catch (Exception e) {
        e.printStackTrace();
    }
    Log.i(TAG, "isSpeaking>>" + Boolean.toString(result));
    return result;
}
项目:VideoApplication    文件:AudioDecoder.java   
@Override
    protected void prepare() throws IOException {
        if (mState < STATE_PREPARED) {
            MediaFormat format;
            if (mState == STATE_UNINITIALIZED) {
                mTrackIndex = selectTrack();
                if (mTrackIndex < 0) {
                    setState(STATE_NO_TRACK_FOUND);
                    return;
                }
                mExtractor.selectTrack(mTrackIndex);
                format = mExtractor.getTrackFormat(mTrackIndex);
                mSampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
                int audioChannels = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
                mAudioTrack = new AudioTrack(
                        AudioManager.STREAM_MUSIC,
                        mSampleRate,
                        (audioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
                        AudioFormat.ENCODING_PCM_16BIT,
                        AudioTrack.getMinBufferSize(
                                mSampleRate,
                                (audioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
                                AudioFormat.ENCODING_PCM_16BIT
                        ),
                        AudioTrack.MODE_STREAM
                );
                mState = STATE_INITIALIZED;
            } else {
                format = mExtractor.getTrackFormat(mTrackIndex);
            }

            String mime = format.getString(MediaFormat.KEY_MIME);
            Log.d(TAG, mime);
            mMediaCodec = MediaCodec.createDecoderByType(mime);
//            mMediaCodec.setCallback(mCallback);
            mMediaCodec.configure(format, null, null, 0);
            setState(STATE_PREPARED);
        }
        super.prepare();
    }
项目:cythara    文件:AndroidAudioPlayer.java   
/**
 * Constructs a new AndroidAudioPlayer from an audio format, default buffer size and stream type.
 *
 * @param audioFormat The audio format of the stream that this AndroidAudioPlayer will process.
 *                    This can only be 1 channel, PCM 16 bit.
 * @param bufferSizeInSamples  The requested buffer size in samples.
 * @param streamType  The type of audio stream that the internal AudioTrack should use. For
 *                    example, {@link AudioManager#STREAM_MUSIC}.
 * @throws IllegalArgumentException if audioFormat is not valid or if the requested buffer size is invalid.
 * @see AudioTrack
 */
public AndroidAudioPlayer(TarsosDSPAudioFormat audioFormat, int bufferSizeInSamples, int streamType) {
    if (audioFormat.getChannels() != 1) {
        throw new IllegalArgumentException("TarsosDSP only supports mono audio channel count: " + audioFormat.getChannels());
    }

    // The requested sample rate
    int sampleRate = (int) audioFormat.getSampleRate();

    //The buffer size in bytes is twice the buffer size expressed in samples if 16bit samples are used:
    int bufferSizeInBytes = bufferSizeInSamples * audioFormat.getSampleSizeInBits()/8;

    // From the Android API about getMinBufferSize():
    // The total size (in bytes) of the internal buffer where audio data is read from for playback.
    // If track's creation mode is MODE_STREAM, you can write data into this buffer in chunks less than or equal to this size,
    // and it is typical to use chunks of 1/2 of the total size to permit double-buffering. If the track's creation mode is MODE_STATIC,
    // this is the maximum length sample, or audio clip, that can be played by this instance. See getMinBufferSize(int, int, int) to determine
    // the minimum required buffer size for the successful creation of an AudioTrack instance in streaming mode. Using values smaller
    // than getMinBufferSize() will result in an initialization failure.
    int minBufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO,  AudioFormat.ENCODING_PCM_16BIT);
    if(minBufferSizeInBytes > bufferSizeInBytes){
        throw new IllegalArgumentException("The buffer size should be at least " + (minBufferSizeInBytes/(audioFormat.getSampleSizeInBits()/8)) + " (samples) according to  AudioTrack.getMinBufferSize().");
    }

    //http://developer.android.com/reference/android/media/AudioTrack.html#AudioTrack(int, int, int, int, int, int)
    audioTrack = new AudioTrack(streamType, sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes,AudioTrack.MODE_STREAM);

    audioTrack.play();
}
项目:Ftc2018RelicRecovery    文件:FtcAndroidTone.java   
/**
 * This method is called when the sample at the set marker has been played. This is used to indicate
 * the completion of the tone played.
 *
 * @param track specifies the AudioTrack object that was playing.
 */
@Override
public void onMarkerReached(AudioTrack track)
{
    final String funcName = "onMarkerReached";

    if (debugEnabled)
    {
        dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.CALLBK);
        dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.CALLBK);
    }

    audioTrack.setNotificationMarkerPosition(0);
    playing = false;
}
项目:letv    文件:FFMpegPlayer.java   
public static void initAudioTrack(Object mediaplayer_ref, int sampleRateInHz, int channelConfig) throws IOException {
    FFMpegPlayer mp = (FFMpegPlayer) ((WeakReference) mediaplayer_ref).get();
    if (mp != null) {
        int bufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRateInHz, channelConfig, 2);
        try {
            mp.mTrack = new AudioTrack(3, sampleRateInHz, channelConfig, 2, bufferSizeInBytes, 1);
        } catch (IllegalStateException e) {
            e.printStackTrace();
        }
        try {
            if (mp.mTrack != null) {
                mp.mTrack.play();
            }
        } catch (IllegalStateException e2) {
            LogTag.e("Error creating uninitialized AudioTrack, re-initial it");
            int tryCount = 0;
            while (mp.mTrack.getPlayState() == 0 && tryCount < 3) {
                if (mp.mTrack != null) {
                    mp.mTrack.stop();
                    mp.mTrack.release();
                    mp.mTrack = null;
                }
                mp.mTrack = new AudioTrack(3, sampleRateInHz, channelConfig, 2, bufferSizeInBytes, 1);
                tryCount++;
            }
            if (mp.mTrack != null) {
                mp.mTrack.play();
            }
        }
    }
}
项目:buildAPKsApps    文件:ControllerFactory.java   
private void createAudioTrack() throws InitializationException {
    // The AudioTrack configurations parameters used here, are guaranteed to
    // be supported on all devices.

    // AudioFormat.CHANNEL_OUT_MONO should be used in place of deprecated
    // AudioFormat.CHANNEL_CONFIGURATION_MONO, but it is not available for
    // API level 3.

    // Output buffer for playing should be as short as possible, so
    // AudioBufferPlayed events are not invoked long before audio buffer is
    // actually played. Also, when AudioTrack is stopped, it is filled with
    // silence of length audioTrackBufferSizeInBytes. If the silence is too
    // long, it causes a delay before the next recorded data starts playing.
    audioTrackBufferSizeInBytes = AudioTrack.getMinBufferSize(
            SpeechTrainerConfig.SAMPLE_RATE_HZ,
            AudioFormat.CHANNEL_CONFIGURATION_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    if (audioTrackBufferSizeInBytes <= 0) {
        throw new InitializationException("Failed to initialize playback.");
    }

    audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            SpeechTrainerConfig.SAMPLE_RATE_HZ,
            AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
            audioTrackBufferSizeInBytes,
            AudioTrack.MODE_STREAM);
    if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
        audioTrack = null;
        throw new InitializationException("Failed to initialize playback.");
    }
}
项目:rtmp-rtsp-stream-client-java    文件:DecodersTest.java   
public void audioDecoderTest(String filePath) throws IOException {
  AudioDecoder audioDecoderThread = new AudioDecoder(this, this);
  audioDecoderThread.initExtractor(filePath);
  audioDecoderThread.prepareAudio();

  int buffsize = AudioTrack.getMinBufferSize(audioDecoderThread.getSampleRate(),
      AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
  audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, audioDecoderThread.getSampleRate(),
      AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT, buffsize,
      AudioTrack.MODE_STREAM);
  audioTrack.play();
  audioDecoderThread.start();
}
项目:AppRTC-Android    文件:WebRtcAudioTrack.java   
private boolean startPlayout() {
  Logging.d(TAG, "startPlayout");
  assertTrue(audioTrack != null);
  assertTrue(audioThread == null);
  if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
    reportWebRtcAudioTrackStartError("AudioTrack instance is not successfully initialized.");
    return false;
  }
  audioThread = new AudioTrackThread("AudioTrackJavaThread");
  audioThread.start();
  return true;
}