Java 类android.media.AudioFormat 实例源码

项目:GoogleAssistantSDK    文件:SpeechService.java   
@Override
public void onCreate() {
    super.onCreate();
    mHandler = new Handler();
    fetchAccessToken();

    int outputBufferSize = AudioTrack.getMinBufferSize(16000,
            AudioFormat.CHANNEL_IN_STEREO,
            AudioFormat.ENCODING_PCM_16BIT);

    try {
        mAudioTrack = new AudioTrack(AudioManager.USE_DEFAULT_STREAM_TYPE, 16000, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, outputBufferSize, AudioTrack.MODE_STREAM);
        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
            mAudioTrack.setVolume(DEFAULT_VOLUME);
        }
        mAudioTrack.play();
    }catch (Exception e){
        e.printStackTrace();
    }
}
项目:Ftc2018RelicRecovery    文件:FtcAndroidTone.java   
/**
 * This method plays the sound data in the specified buffer.
 *
 * @param buffer specifies the sound data buffer.
 */
public void playSound(short[] buffer)
{
    final String funcName = "playSound";

    if (debugEnabled)
    {
        dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.API);
        dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.API);
    }

    audioTrack = new AudioTrack(
            AudioManager.STREAM_MUSIC,
            sampleRate,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            buffer.length*2,    //buffer length in bytes
            AudioTrack.MODE_STATIC);
    audioTrack.write(buffer, 0, buffer.length);
    audioTrack.setNotificationMarkerPosition(buffer.length);
    audioTrack.setPlaybackPositionUpdateListener(this);
    audioTrack.play();
    playing = true;
}
项目:FtcSamples    文件:FtcAndroidTone.java   
/**
 * This method plays the sound data in the specified buffer.
 *
 * @param buffer specifies the sound data buffer.
 */
public void playSound(short[] buffer)
{
    final String funcName = "playSound";

    if (debugEnabled)
    {
        dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.API);
        dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.API);
    }

    audioTrack = new AudioTrack(
            AudioManager.STREAM_MUSIC,
            sampleRate,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            buffer.length*2,    //buffer length in bytes
            AudioTrack.MODE_STATIC);
    audioTrack.write(buffer, 0, buffer.length);
    audioTrack.setNotificationMarkerPosition(buffer.length);
    audioTrack.setPlaybackPositionUpdateListener(this);
    audioTrack.play();
    playing = true;
}
项目:AudioGenderIdentifier    文件:MainActivity.java   
@Override
    public void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        bufferSize = AudioRecord.getMinBufferSize(16000,
                AudioFormat.CHANNEL_IN_MONO,
                AudioFormat.ENCODING_PCM_16BIT);

        setButtonHandlers();
        enableButtons(false);

        inferenceInterface = new TensorFlowInferenceInterface();
        inferenceInterface.initializeTensorFlow(getAssets(), MODEL_FILE);
//        tensorFlowSample();

    }
项目:AndroidSdrRtlTuner    文件:AudioSink.java   
/**
 * Constructor. Will create a new AudioSink.
 *
 * @param packetSize    size of the incoming packets
 * @param sampleRate    sample rate of the audio signal
 */
public AudioSink (int packetSize, int sampleRate) {
    this.packetSize = packetSize;
    this.sampleRate = sampleRate;

    // Create the queues and fill them with
    this.inputQueue = new ArrayBlockingQueue<SamplePacket>(QUEUE_SIZE);
    this.outputQueue = new ArrayBlockingQueue<SamplePacket>(QUEUE_SIZE);
    for (int i = 0; i < QUEUE_SIZE; i++)
        this.outputQueue.offer(new SamplePacket(packetSize));

    // Create an instance of the AudioTrack class:
    int bufferSize = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
    this.audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO,
                                AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);

    // Create the audio filters:
    this.audioFilter1 = FirFilter.createLowPass(2, 1, 1, 0.1f, 0.15f, 30);
    Log.d(LOGTAG,"constructor: created audio filter 1 with " + audioFilter1.getNumberOfTaps() + " Taps.");
    this.audioFilter2 = FirFilter.createLowPass(4, 1, 1, 0.1f, 0.1f, 30);
    Log.d(LOGTAG,"constructor: created audio filter 2 with " + audioFilter2.getNumberOfTaps() + " Taps.");
    this.tmpAudioSamples = new SamplePacket(packetSize);
}
项目:sonicky    文件:Encoder.java   
private void init_(boolean eccEnabled) {
    mEccEncoder = EccInstanceProvider.getEncoder(eccEnabled);
    int minBufferSizeInBytes = AudioTrack.getMinBufferSize(
            RATE,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    // 44.1kHz mono 16bit
    mAudioTrack = new AudioTrack(
            AudioManager.STREAM_MUSIC,
            RATE,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            minBufferSizeInBytes,
            AudioTrack.MODE_STREAM);
    mExecutorService = Executors.newSingleThreadExecutor();
}
项目:dcs-sdk-java    文件:AudioTrackPlayerImpl.java   
private void initAudioTrack(int sampleRate, int channels) {
    if (sampleRate <= 0) {
        sampleRate = AUDIO_FORMAT_PCM8K;
    }
    if (channels <= 0) {
        channels = 1;
    }
    if (channels == 1) {
        mChannelConfig = AudioFormat.CHANNEL_OUT_MONO;
    } else if (channels == 2) {
        mChannelConfig = AudioFormat.CHANNEL_OUT_STEREO;
    }
    if (iCurrentQueueAudioFormat == sampleRate) {
        if (mAudioTrack == null) {
            mAudioTrack = createAudioTrack(iCurrentQueueAudioFormat);
        }
    } else {
        Log.d(TAG, "Decoder-initAudioTrack-sampleRate=" + sampleRate);
        Log.d(TAG, "Decoder-initAudioTrack-channels=" + channels);
        mAudioTrack = createAudioTrack(sampleRate);
        iCurrentQueueAudioFormat = sampleRate;
    }
}
项目:dcs-sdk-java    文件:AudioTrackPlayerImpl.java   
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
    minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);
    // 解决异常IllegalArgumentException: Invalid audio buffer size
    int channelCount = 1;
    switch (channelConfig) {
        // AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
        case AudioFormat.CHANNEL_OUT_DEFAULT:
        case AudioFormat.CHANNEL_OUT_MONO:
        case AudioFormat.CHANNEL_CONFIGURATION_MONO:
            channelCount = 1;
            break;
        case AudioFormat.CHANNEL_OUT_STEREO:
        case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
            channelCount = 2;
            break;
        default:
            channelCount = Integer.bitCount(channelConfig);
    }
    // 判断minBufferSize是否在范围内,如果不在设定默认值为1152
    int frameSizeInBytes = channelCount * (audioFormat == AudioFormat.ENCODING_PCM_8BIT ? 1 : 2);
    if ((minBufferSize % frameSizeInBytes != 0) || (minBufferSize < 1)) {
        minBufferSize = 1152;
    }
    return minBufferSize;
}
项目:AssistantBySDK    文件:PcmPlayer.java   
public PcmPlayer(Context context, Handler handler) {
    this.mContext = context;
    this.audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, wBufferSize, AudioTrack.MODE_STREAM);
    this.handler = handler;
    audioTrack.setPlaybackPositionUpdateListener(this, handler);
    cacheDir = context.getExternalFilesDir(Environment.DIRECTORY_MUSIC);
}
项目:Fatigue-Detection    文件:MediaCodecUtils.java   
@TargetApi(MIN_API_LEVEL_AUDIO)
public static int checkMediaCodecAudioEncoderSupport(){
    if(getApiLevel()<MIN_API_LEVEL_AUDIO){
        Log.d(TAG, "checkMediaCodecAudioEncoderSupport: Min API is 16");
        return CODEC_REQ_API_NOT_SATISFIED;
    }
    final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE_AUDIO, TEST_SAMPLE_RATE, 1);
    audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
    audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, AudioFormat.CHANNEL_IN_MONO);
    audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, TEST_AUDIO_BIT_RATE);
    audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 1);
    MediaCodec mediaCodec;
    try {
        mediaCodec = MediaCodec.createEncoderByType(MIME_TYPE_AUDIO);
        mediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
        mediaCodec.start();
        mediaCodec.stop();
        mediaCodec.release();
        mediaCodec = null;
    } catch (Exception ex) {
        Log.e(TAG, "Failed on creation of codec #", ex);
        return CODEC_ERROR;
    }
    return CODEC_SUPPORTED;
}
项目:Fatigue-Detection    文件:AudioEncoderCore.java   
public AudioEncoderCore(MMediaMuxer MMediaMuxer) throws IOException {
        super(MMediaMuxer);
        final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE, SAMPLE_RATE, 1);
        audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
        audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, AudioFormat.CHANNEL_IN_MONO);
        audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
        audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 1);
//      audioFormat.setLong(MediaFormat.KEY_MAX_INPUT_SIZE, inputFile.length());
//      audioFormat.setLong(MediaFormat.KEY_DURATION, (long)durationInMs );
        if (VERBOSE) Log.i(TAG, "format: " + audioFormat);
        mEncoder = MediaCodec.createEncoderByType(MIME_TYPE);
        mEncoder.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
        mEncoder.start();
        if (mAudioThread == null) {
            mAudioThread = new AudioThread();
            mAudioThread.start();
            capturing=true;
            stopped=false;
        }
    }
项目:LittleBitLouder    文件:TOne.java   
public AudioRecord findAudioRecord() {

        for (int rate : mSampleRates) {
            for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT }) {
                for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO }) {
                    try {
                        Log.d("C.TAG", "Attempting rate " + rate + "Hz, bits: " + audioFormat +      ", channel: "
                                + channelConfig);
                        int bufferSize = AudioRecord.getMinBufferSize(rate,      AudioFormat.CHANNEL_IN_MONO , AudioFormat.ENCODING_PCM_16BIT);

                        if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
                            // check if we can instantiate and have a success
                            AudioRecord recorder = new AudioRecord(AudioSource.MIC, DEFAULT_RATE,      channelConfig, audioFormat, bufferSize);

                            if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
                                return recorder;
                        }
                    } catch (Exception e) {
                        Log.e("C.TAG", rate + "Exception, keep trying.",e);
                    }
                }
            }
        }
        return null;
    }
项目:buildAPKsSamples    文件:SimpleAudioOutput.java   
public AudioTrack createAudioTrack(int frameRate) {
    int minBufferSizeBytes = AudioTrack.getMinBufferSize(frameRate,
            AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_FLOAT);
    Log.i(TAG, "AudioTrack.minBufferSize = " + minBufferSizeBytes
            + " bytes = " + (minBufferSizeBytes / BYTES_PER_FRAME)
            + " frames");
    int bufferSize = 8 * minBufferSizeBytes / 8;
    int outputBufferSizeFrames = bufferSize / BYTES_PER_FRAME;
    Log.i(TAG, "actual bufferSize = " + bufferSize + " bytes = "
            + outputBufferSizeFrames + " frames");

    AudioTrack player = new AudioTrack(AudioManager.STREAM_MUSIC,
            mFrameRate, AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_PCM_FLOAT, bufferSize,
            AudioTrack.MODE_STREAM);
    Log.i(TAG, "created AudioTrack");
    return player;
}
项目:buildAPKsSamples    文件:SimpleAudioOutput.java   
public AudioTrack createAudioTrack(int frameRate) {
    int minBufferSizeBytes = AudioTrack.getMinBufferSize(frameRate,
            AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_FLOAT);
    Log.i(TAG, "AudioTrack.minBufferSize = " + minBufferSizeBytes
            + " bytes = " + (minBufferSizeBytes / BYTES_PER_FRAME)
            + " frames");
    int bufferSize = 8 * minBufferSizeBytes / 8;
    int outputBufferSizeFrames = bufferSize / BYTES_PER_FRAME;
    Log.i(TAG, "actual bufferSize = " + bufferSize + " bytes = "
            + outputBufferSizeFrames + " frames");

    AudioTrack player = new AudioTrack(AudioManager.STREAM_MUSIC,
            mFrameRate, AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_PCM_FLOAT, bufferSize,
            AudioTrack.MODE_STREAM);
    Log.i(TAG, "created AudioTrack");
    return player;
}
项目:CXJPadProject    文件:ExtAudioRecorder.java   
@SuppressWarnings("deprecation")
public static ExtAudioRecorder getInstanse(Boolean recordingCompressed) {
    ExtAudioRecorder result = null;

    if (recordingCompressed) {
        result = new ExtAudioRecorder(false, AudioSource.MIC,
                sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
                AudioFormat.ENCODING_PCM_16BIT);
    } else {
        int i = 0;
        do {
            result = new ExtAudioRecorder(true, AudioSource.MIC,
                    sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
                    AudioFormat.ENCODING_PCM_16BIT);

        } while ((++i < sampleRates.length)
                & !(result.getState() == ExtAudioRecorder.State.INITIALIZING));
    }
    return result;
}
项目:perfectTune    文件:TuneThread.java   
@Override
public void run() {
    super.run();
    isRunning = true;
    int buffsize = AudioTrack.getMinBufferSize(sr,
            AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
    // create an audiotrack object
    AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            sr, AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT, buffsize,
            AudioTrack.MODE_STREAM);

    short samples[] = new short[buffsize];
    int amp = 10000;
    double twopi = 8.*Math.atan(1.);
    double ph = 0.0;

    // start audio
    audioTrack.play();

    // synthesis loop
    while(isRunning){
        double fr = tuneFreq;
        for(int i=0; i < buffsize; i++){
            samples[i] = (short) (amp*Math.sin(ph));
            ph += twopi*fr/sr;
        }
        audioTrack.write(samples, 0, buffsize);
    }
    audioTrack.stop();
    audioTrack.release();
}
项目:phonk    文件:PWave.java   
public PWave(AppRunner appRunner) {
    super(appRunner);
    appRunner.whatIsRunning.add(this);

    // set the buffer size
    buffsize = AudioTrack.getMinBufferSize(mSampleRate,
            AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);

    samples = new short[buffsize];

    // create an audiotrack object
    audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            mSampleRate, AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT, buffsize,
            AudioTrack.MODE_STREAM);

    // start audio
    audioTrack.play();
}
项目:EvilsLive    文件:AudioCapture.java   
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
        int numOfChannels, bitsPersample;
        if (channelConfig == AudioFormat.CHANNEL_IN_MONO) {
            numOfChannels = 1;
        } else {
            numOfChannels = 2;
        }
        if (AudioFormat.ENCODING_PCM_16BIT == audioFormat) {
            bitsPersample = 16;
        } else {
            bitsPersample = 8;
        }
        int periodInFrames = sampleRate * TIMER_INTERVAL / 1000;        //num of frames in a second is same as sample rate
        //refer to android/4.1.1/frameworks/av/media/libmedia/AudioRecord.cpp, AudioRecord::getMinFrameCount method
        //we times 2 for ping pong use of record buffer
        mMinBufferSize = periodInFrames * 2  * numOfChannels * bitsPersample / 8;
        if (mMinBufferSize < AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat)) {
            // Check to make sure buffer size is not smaller than the smallest allowed one
            mMinBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
            // Set frame period and timer interval accordingly
//            periodInFrames = mMinBufferSize / ( 2 * bitsPersample * numOfChannels / 8 );
        }

        return mMinBufferSize;
    }
项目:Exoplayer2Radio    文件:AudioTrack.java   
/**
 * Instantiates an {@link android.media.AudioTrack} to be used with tunneling video playback.
 */
@TargetApi(21)
private static android.media.AudioTrack createHwAvSyncAudioTrackV21(int sampleRate,
    int channelConfig, int encoding, int bufferSize, int sessionId) {
  AudioAttributes attributesBuilder = new AudioAttributes.Builder()
      .setUsage(AudioAttributes.USAGE_MEDIA)
      .setContentType(AudioAttributes.CONTENT_TYPE_MOVIE)
      .setFlags(AudioAttributes.FLAG_HW_AV_SYNC)
      .build();
  AudioFormat format = new AudioFormat.Builder()
      .setChannelMask(channelConfig)
      .setEncoding(encoding)
      .setSampleRate(sampleRate)
      .build();
  return new android.media.AudioTrack(attributesBuilder, format, bufferSize, MODE_STREAM,
      sessionId);
}
项目:ssj    文件:Microphone.java   
public static int audioFormatSampleBytes(int f)
{
    switch (f)
    {
        case AudioFormat.ENCODING_PCM_8BIT:
            return 1;
        case AudioFormat.ENCODING_PCM_16BIT:
        case AudioFormat.ENCODING_DEFAULT:
            return 2;
        case AudioFormat.ENCODING_PCM_FLOAT:
            return 4;
        case AudioFormat.ENCODING_INVALID:
        default:
            return 0;
    }
}
项目:ssj    文件:Microphone.java   
public static Cons.Type audioFormatSampleType(int f)
{
    switch (f)
    {
        case AudioFormat.ENCODING_PCM_8BIT:
            return Cons.Type.CHAR;
        case AudioFormat.ENCODING_PCM_16BIT:
        case AudioFormat.ENCODING_DEFAULT:
            return Cons.Type.SHORT;
        case AudioFormat.ENCODING_PCM_FLOAT:
            return Cons.Type.FLOAT;
        case AudioFormat.ENCODING_INVALID:
        default:
            return Cons.Type.UNDEF;
    }
}
项目:libcommon    文件:MediaAudioDecoder.java   
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
@Override
protected int handlePrepare(MediaExtractor media_extractor) {
    int track_index = selectTrack(media_extractor, "audio/");
    if (track_index >= 0) {
        final MediaFormat format = media_extractor.getTrackFormat(track_index);
        mAudioChannels = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
        mAudioSampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
        final int min_buf_size = AudioTrack.getMinBufferSize(mAudioSampleRate,
                (mAudioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
                AudioFormat.ENCODING_PCM_16BIT);
        final int max_input_size = format.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
        mAudioInputBufSize =  min_buf_size > 0 ? min_buf_size * mAudioChannels * 2 : max_input_size;
        if (mAudioInputBufSize > max_input_size) mAudioInputBufSize = max_input_size;
        final int frameSizeInBytes = mAudioChannels * 2;
        mAudioInputBufSize = (mAudioInputBufSize / frameSizeInBytes) * frameSizeInBytes;
        if (DEBUG) Log.v(TAG, String.format("getMinBufferSize=%d,max_input_size=%d,mAudioInputBufSize=%d",min_buf_size, max_input_size, mAudioInputBufSize));
    }
    return track_index;
}
项目:Android-Audio-Recorder    文件:Sound.java   
public AudioTrack generateTrack(int sampleRate, short[] buf, int len) {
    int end = len;

    int c = 0;

    if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_MONO)
        c = AudioFormat.CHANNEL_OUT_MONO;

    if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_STEREO)
        c = AudioFormat.CHANNEL_OUT_STEREO;

    // old phones bug.
    // http://stackoverflow.com/questions/27602492
    //
    // with MODE_STATIC setNotificationMarkerPosition not called
    AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate,
            c, RawSamples.AUDIO_FORMAT,
            len * (Short.SIZE / 8), AudioTrack.MODE_STREAM);
    track.write(buf, 0, len);
    if (track.setNotificationMarkerPosition(end) != AudioTrack.SUCCESS)
        throw new RuntimeException("unable to set marker");
    return track;
}
项目:android_packages_apps_tv    文件:AudioCapabilitiesReceiver.java   
@Override
public void onReceive(Context context, Intent intent) {
    String action = intent.getAction();
    if (!action.equals(AudioManager.ACTION_HDMI_AUDIO_PLUG)) {
        return;
    }
    boolean supported = false;
    int[] supportedEncodings = intent.getIntArrayExtra(AudioManager.EXTRA_ENCODINGS);
    if (supportedEncodings != null) {
        for (int supportedEncoding : supportedEncodings) {
            if (supportedEncoding == AudioFormat.ENCODING_AC3) {
                supported = true;
                break;
            }
        }
    }
    if (mListener != null) {
        mListener.onAc3PassthroughCapabilityChange(supported);
    }
    if (!mAnalytics.isAppOptOut()) {
        reportAudioCapabilities(supported);
    }
}
项目:UltraPush    文件:AnalyzeActivity.java   
/**
 * Return a array of verified audio sampling rates.
 *
 * @param requested: the sampling rates to be verified
 */
private static String[] validateAudioRates(String[] requested) {
    ArrayList<String> validated = new ArrayList<String>();
    for (String s : requested) {
        int rate;
        String[] sv = s.split("::");
        if (sv.length == 1) {
            rate = Integer.parseInt(sv[0]);
        } else {
            rate = Integer.parseInt(sv[1]);
        }
        if (rate != 0) {
            if (AudioRecord.getMinBufferSize(rate, AudioFormat.CHANNEL_IN_MONO,
                    AudioFormat.ENCODING_PCM_16BIT) != AudioRecord.ERROR_BAD_VALUE) {
                validated.add(s);
            }
        } else {
            validated.add(s);
        }
    }
    return validated.toArray(new String[0]);
}
项目:BlocklyBot    文件:Tone.java   
static private AudioTrack generateTone(double freqHz, int durationMs) {
    int count = (int) (44100.0 * 2.0 * (durationMs / 1000.0)) & ~1;
    short[] samples = new short[count];
    int size = count * (Short.SIZE / 8);
    Log.d(TAG, freqHz + "Hz for " + durationMs + "ms = " + count + " samples at 44.1Khz 2ch = " + size + " bytes");
    for (int i = 0; i < count; i += 2) {
        short sample = (short) (Math.sin(2 * Math.PI * i / (44100.0 / freqHz)) * 0x7FFF * .75);
        samples[i + 0] = sample;
        samples[i + 1] = sample;
    }
    AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, 44100,
            AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT,
            size, AudioTrack.MODE_STATIC);
    track.setNotificationMarkerPosition(count / 2);
    track.write(samples, 0, count);
    return track;
}
项目:QRDataTransfer-Android    文件:AudioTrackManager.java   
/**
 * 设置频率
 * @param rate
 */
@SuppressWarnings("deprecation")
public void start(int rate){
    stop();
    if(rate>0){
        Hz=rate;
        waveLen = RATE / Hz;
        length = waveLen * Hz;
        audioTrack=new AudioTrack(AudioManager.STREAM_MUSIC, RATE,
                AudioFormat.CHANNEL_CONFIGURATION_STEREO, // CHANNEL_CONFIGURATION_MONO,
                AudioFormat.ENCODING_PCM_8BIT, length, AudioTrack.MODE_STREAM);
        //生成正弦波
        wave=SinWave.sin(wave, waveLen, length);
        if(audioTrack!=null){
            audioTrack.play();
        }
    }else{
        return;
    }

}
项目:Viewer    文件:AudioThread.java   
public AudioThread(int sampleRateInHz, int channel, long streamId, long decoderId, Media media)
{
    if (channel == 1)
    {
        channel_configuration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
    } else
    {
        channel_configuration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    }
    this.mediaStreamId = streamId;
    this.decoderId = decoderId;
    this.media = media;
    int minBufferSize = AudioTrack.getMinBufferSize(sampleRateInHz, channel_configuration, AudioFormat.ENCODING_PCM_16BIT);
    if (minBufferSize > audioLength)
    {
        audioLength = minBufferSize;
    }
    mAudioBuffer = new byte[audioLength];
    mAudio = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRateInHz, channel_configuration, AudioFormat.ENCODING_PCM_16BIT, audioLength, AudioTrack.MODE_STREAM);
}
项目:Android-Guitar-Tuner    文件:AndroidAudioPlayer.java   
public AndroidAudioPlayer(final AudioConfig audioConfig) {
    AudioAttributes audioAttributes = new AudioAttributes.Builder()
            .setLegacyStreamType(AudioManager.STREAM_MUSIC)
            .setUsage(AudioAttributes.USAGE_MEDIA)
            .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
            .build();

    AudioFormat audioFormat = new AudioFormat.Builder()
            .setChannelMask(audioConfig.getOutputChannel())
            .setEncoding(audioConfig.getOutputFormat())
            .setSampleRate(audioConfig.getSampleRate())
            .build();

    audioTrack = new AudioTrack(audioAttributes,
            audioFormat,
            audioConfig.getOutputBufferSize(),
            AudioTrack.MODE_STATIC,
            AudioManager.AUDIO_SESSION_ID_GENERATE);

    outputByteCount = audioConfig.getOutputFormatByteCount();
}
项目:K-Sonic    文件:AudioTrack.java   
/**
 * Instantiates an {@link android.media.AudioTrack} to be used with tunneling video playback.
 */
@TargetApi(21)
private static android.media.AudioTrack createHwAvSyncAudioTrackV21(int sampleRate,
    int channelConfig, int encoding, int bufferSize, int sessionId) {
  AudioAttributes attributesBuilder = new AudioAttributes.Builder()
      .setUsage(AudioAttributes.USAGE_MEDIA)
      .setContentType(AudioAttributes.CONTENT_TYPE_MOVIE)
      .setFlags(AudioAttributes.FLAG_HW_AV_SYNC)
      .build();
  AudioFormat format = new AudioFormat.Builder()
      .setChannelMask(channelConfig)
      .setEncoding(encoding)
      .setSampleRate(sampleRate)
      .build();
  return new android.media.AudioTrack(attributesBuilder, format, bufferSize, MODE_STREAM,
      sessionId);
}
项目:K-Sonic    文件:Track.java   
private void initDevice(int sampleRate, int numChannels) {
    if (isJMono)
        numChannels = 2;
    mLock.lock();
    try {
        final int format = findFormatFromChannels(numChannels);
        final int minSize = AudioTrack.getMinBufferSize(sampleRate, format,
                AudioFormat.ENCODING_PCM_16BIT);
        mTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, format,
                AudioFormat.ENCODING_PCM_16BIT, minSize * 4,
                AudioTrack.MODE_STREAM);
        mSonic = new Sonic(sampleRate, numChannels);
    } catch (Exception e) {//IllegalArgumentException
        throw e;
    } finally {
        mLock.unlock();
    }
}
项目:GLES2_AUDIO_VIDEO_RECODE    文件:MediaAudioEncoderRunable.java   
/**
 * 录制前的准备
 *
 * @throws IOException
 */
@Override
public void prepare() throws IOException {

    mTrackIndex = -1;
    mMuxerStarted = mIsEndOfStream = false;

    // mediaFormat配置
    final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE, SAMPLE_RATE, 1);
    audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
    audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, AudioFormat.CHANNEL_IN_MONO);
    audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
    audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 1);
    //
    mMediaCodec = MediaCodec.createEncoderByType(MIME_TYPE);
    mMediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
    mMediaCodec.start();

    if (mMediaEncoderListener != null) {
        try {
            mMediaEncoderListener.onPrepared(this);
        } catch (final Exception e) {
            LogUtils.e(TAG, "prepare:", e);
        }
    }
}
项目:assistance-platform-client-sdk-android    文件:LoudnessSensor.java   
public AudioRecorder(LoudnessSensor sensor) {

            this.mSensor = sensor;

            int channel = AudioFormat.CHANNEL_IN_MONO;
            int mic = AudioSource.MIC;

            // Berechne den Puffer
            int minAudioBuffer = AudioRecord.getMinBufferSize(
                    COMMON_AUDIO_FREQUENCY,
                    channel,
                    AudioFormat.ENCODING_PCM_16BIT);
            int audioBuffer = minAudioBuffer * 6;

            // Erstelle den Recorder
            audioInput = new AudioRecord(
                    mic,
                    COMMON_AUDIO_FREQUENCY,
                    channel,
                    AudioFormat.ENCODING_PCM_16BIT,
                    audioBuffer);
        }
项目:ChristmasVoice    文件:MediaToolsProvider.java   
AudioTrack getAudioTrack(long bufferSize) {
    AudioFormat audioFormat = getAudioFormat(AudioFormat.CHANNEL_OUT_MONO);
    AudioAttributes attributes = new AudioAttributes.Builder()
            .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
            .setUsage(AudioAttributes.USAGE_MEDIA)
            .build();
    AudioTrack track = new AudioTrack.Builder()
            .setAudioFormat(audioFormat)
            .setBufferSizeInBytes((int) bufferSize)
            .setAudioAttributes(attributes)
            .setTransferMode(AudioTrack.MODE_STREAM)
            .build();
    PresetReverb reverb = getReverb();
    track.attachAuxEffect(reverb.getId());
    track.setAuxEffectSendLevel(1.0f);
    return track;
}
项目:EntboostIM    文件:ExtAudioRecorder.java   
public static ExtAudioRecorder getInstance(Boolean recordingCompressed, VoiceCallback callback) {
    if (recordingCompressed) {
        result = new ExtAudioRecorder(false, AudioSource.MIC,
                sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
                AudioFormat.ENCODING_PCM_16BIT, callback);
    } else {
        int i = 3;
        do {
            result = new ExtAudioRecorder(true, AudioSource.MIC,
                    sampleRates[i], AudioFormat.CHANNEL_CONFIGURATION_MONO,
                    AudioFormat.ENCODING_PCM_16BIT, callback);

        } while ((--i >= 0)
                && !(result.getState() == ExtAudioRecorder.State.INITIALIZING));
    }
    return result;
}
项目:dtlive_android    文件:AudioCapture.java   
public void start() {
    int minBufferSize = AudioRecord.getMinBufferSize(mSampleRate, AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT);
    int targetSize = mSampleRate * mChannels;      // 1 seconds buffer size
    if (targetSize < minBufferSize) {
        targetSize = minBufferSize;
    }
    if (audioCapture == null) {
        try {
            audioCapture = new AudioRecord(MediaRecorder.AudioSource.MIC,
                    mSampleRate,
                    AudioFormat.CHANNEL_IN_STEREO,
                    AudioFormat.ENCODING_PCM_16BIT,
                    targetSize);
        } catch (IllegalArgumentException    e) {
            audioCapture = null;
        }
    }

    LiveJniLib.native_audio_init(mSampleRate, mChannels);

    if ( audioCapture != null) {
        audioCapture.startRecording();
        AudioEncoder audioEncoder = new AudioEncoder();
        audioEncoder.start();
    }
}
项目:Newton_for_Android_AS    文件:RecordWithAudioRecord.java   
/**
 * 
 * @param audioSource @see MediaRecorder.AudioSource 音频来源
 */
@Override
public void init(int audioSource, File desFile) throws IOException {
    File dir = desFile.getParentFile();
    if (!dir.exists()) {
        dir.mkdirs();
    }

    isRecording = new AtomicBoolean(false);

    int sampleRateInHz = 16000;
    int channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO;
    int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
    audioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes);

    dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(desFile)));
}
项目:dcs-sdk-java    文件:AudioTrackPlayerImpl.java   
private AudioTrack createAudioTrack(int sampleRate) {
    int encoding = AudioFormat.ENCODING_PCM_16BIT;
    // 得到一个满足最小要求的缓冲区的大小
    int minBufferSize = getMinBufferSize(sampleRate, mChannelConfig, encoding);
    Log.d(TAG, "Decoder-AudioTrack-minBufferSize=" + minBufferSize);
    AudioTrack audioTrack =
            new AudioTrack(mStreamType,
                    sampleRate,
                    mChannelConfig,
                    encoding,
                    minBufferSize,
                    AudioTrack.MODE_STREAM);
    audioTrack.play();
    return audioTrack;
}
项目:dcs-sdk-java    文件:AudioRecordThread.java   
public AudioRecordThread(LinkedBlockingDeque<byte[]> linkedBlockingDeque) {
    this.linkedBlockingDeque = linkedBlockingDeque;
    bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize);
}
项目:libRtmp    文件:AndroidUntil.java   
public static int getRecordBufferSize() {
    int frequency = Options.getInstance().audio.frequency;
    int audioEncoding = Options.getInstance().audio.encoding;
    int channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
    if(Options.getInstance().audio.channelCount == 2) {
        channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    }
    return AudioRecord.getMinBufferSize(frequency, channelConfiguration, audioEncoding);
}