Java 类javax.sound.sampled.AudioFormat 实例源码

项目:jdk8u-jdk    文件:PCMtoPCMCodec.java   
/**
 */
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){

    // filter out targetEncoding from the old getOutputFormats( sourceFormat ) method

    AudioFormat[] formats = getOutputFormats( sourceFormat );
    Vector newFormats = new Vector();
    for(int i=0; i<formats.length; i++ ) {
        if( formats[i].getEncoding().equals( targetEncoding ) ) {
            newFormats.addElement( formats[i] );
        }
    }

    AudioFormat[] formatArray = new AudioFormat[newFormats.size()];

    for (int i = 0; i < formatArray.length; i++) {
        formatArray[i] = (AudioFormat)(newFormats.elementAt(i));
    }

    return formatArray;
}
项目:jdk8u-jdk    文件:AiffFileWriter.java   
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {

        AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
        System.arraycopy(types, 0, filetypes, 0, types.length);

        // make sure we can write this stream
        AudioFormat format = stream.getFormat();
        AudioFormat.Encoding encoding = format.getEncoding();

        if( (AudioFormat.Encoding.ALAW.equals(encoding)) ||
            (AudioFormat.Encoding.ULAW.equals(encoding)) ||
            (AudioFormat.Encoding.PCM_SIGNED.equals(encoding)) ||
            (AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) ) {

            return filetypes;
        }

        return new AudioFileFormat.Type[0];
    }
项目:romanov    文件:FloatSampleBuffer.java   
/**
 * Resets this buffer with the audio data specified in the arguments. This
 * FloatSampleBuffer's sample count will be set to
 * <code>byteCount / format.getFrameSize()</code>.
 * 
 * @param lazy if true, then existing channels will be tried to be re-used
 *            to minimize garbage collection.
 * @throws IllegalArgumentException
 */
public void initFromByteArray(byte[] buffer, int offset, int byteCount,
        AudioFormat format, boolean lazy) {
    if (offset + byteCount > buffer.length) {
        throw new IllegalArgumentException(
                "FloatSampleBuffer.initFromByteArray: buffer too small.");
    }

    int thisSampleCount = byteCount / format.getFrameSize();
    init(format.getChannels(), thisSampleCount, format.getSampleRate(),
            lazy);

    // save format for automatic dithering mode
    originalFormatType = FloatSampleTools.getFormatType(format);

    FloatSampleTools.byte2float(buffer, offset, channels, 0, sampleCount,
            format);
}
项目:romanov    文件:FloatSampleBuffer.java   
/**
 * Write the contents of the byte array to this buffer, overwriting existing
 * data. If the byte array has fewer channels than this float buffer, only
 * the first channels are written. Vice versa, if the byte buffer has more
 * channels than this float buffer, only the first channels of the byte
 * buffer are written to this buffer.
 * <p>
 * The format and the number of samples of this float buffer are not
 * changed, so if the byte array has more samples than fit into this float
 * buffer, it is not expanded.
 * 
 * @param buffer the byte buffer to write to this float buffer
 * @param srcByteOffset the offset in bytes in buffer where to start reading
 * @param format the audio format of the bytes in buffer
 * @param dstSampleOffset the offset in samples where to start writing the
 *            converted float data into this float buffer
 * @param aSampleCount the number of samples to write
 * @return the number of samples actually written
 */
public int writeByteBuffer(byte[] buffer, int srcByteOffset,
        AudioFormat format, int dstSampleOffset, int aSampleCount) {
    if (dstSampleOffset + aSampleCount > getSampleCount()) {
        aSampleCount = getSampleCount() - dstSampleOffset;
    }
    int lChannels = format.getChannels();
    if (lChannels > getChannelCount()) {
        lChannels = getChannelCount();
    }
    if (lChannels > format.getChannels()) {
        lChannels = format.getChannels();
    }
    for (int channel = 0; channel < lChannels; channel++) {
        float[] data = getChannel(channel);

        FloatSampleTools.byte2floatGeneric(buffer, srcByteOffset,
                format.getFrameSize(), data, dstSampleOffset, aSampleCount,
                format);
        srcByteOffset += format.getFrameSize() / format.getChannels();
    }
    return aSampleCount;
}
项目:openjdk-jdk10    文件:AudioFloatFormatConverter.java   
@Override
public AudioInputStream getAudioInputStream(Encoding targetEncoding,
                                            AudioInputStream sourceStream) {
    if (!isConversionSupported(targetEncoding, sourceStream.getFormat())) {
        throw new IllegalArgumentException(
                "Unsupported conversion: " + sourceStream.getFormat()
                        .toString() + " to " + targetEncoding.toString());
    }
    if (sourceStream.getFormat().getEncoding().equals(targetEncoding))
        return sourceStream;
    AudioFormat format = sourceStream.getFormat();
    int channels = format.getChannels();
    Encoding encoding = targetEncoding;
    float samplerate = format.getSampleRate();
    int bits = format.getSampleSizeInBits();
    boolean bigendian = format.isBigEndian();
    if (targetEncoding.equals(Encoding.PCM_FLOAT))
        bits = 32;
    AudioFormat targetFormat = new AudioFormat(encoding, samplerate, bits,
            channels, channels * bits / 8, samplerate, bigendian);
    return getAudioInputStream(targetFormat, sourceStream);
}
项目:jdk8u-jdk    文件:WaveFileWriter.java   
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {

        AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
        System.arraycopy(types, 0, filetypes, 0, types.length);

        // make sure we can write this stream
        AudioFormat format = stream.getFormat();
        AudioFormat.Encoding encoding = format.getEncoding();

        if( AudioFormat.Encoding.ALAW.equals(encoding) ||
            AudioFormat.Encoding.ULAW.equals(encoding) ||
            AudioFormat.Encoding.PCM_SIGNED.equals(encoding) ||
            AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding) ) {

            return filetypes;
        }

        return new AudioFileFormat.Type[0];
    }
项目:OpenJSharp    文件:PCMtoPCMCodec.java   
/**
 */
public AudioInputStream getAudioInputStream(AudioFormat.Encoding targetEncoding, AudioInputStream sourceStream) {

    if( isConversionSupported(targetEncoding, sourceStream.getFormat()) ) {

        AudioFormat sourceFormat = sourceStream.getFormat();
        AudioFormat targetFormat = new AudioFormat( targetEncoding,
                                                    sourceFormat.getSampleRate(),
                                                    sourceFormat.getSampleSizeInBits(),
                                                    sourceFormat.getChannels(),
                                                    sourceFormat.getFrameSize(),
                                                    sourceFormat.getFrameRate(),
                                                    sourceFormat.isBigEndian() );

        return getAudioInputStream( targetFormat, sourceStream );

    } else {
        throw new IllegalArgumentException("Unsupported conversion: " + sourceStream.getFormat().toString() + " to " + targetEncoding.toString() );
    }

}
项目:jlayer    文件:JavaSoundAudioDevice.java   
/**
 * Runs a short test by playing a short silent sound.
 */
public void test()
    throws JavaLayerException
{
    try
    {
        open(new AudioFormat(22050, 16, 1, true, false));
        short[] data = new short[22050/10];
        write(data, 0, data.length);
        flush();
        close();
    }
    catch (RuntimeException ex)
    {
        throw new JavaLayerException("Device test failed: "+ex);
    }

}
项目:romanov    文件:JSBufferedSampleRecorder.java   
/**
 * Constructs a JSBufferedSampleRecorder that expects audio in the given AudioFormat and 
 * which will save to a file with given name.
 * 
 * @param format the AudioFormat you want to record in
 * @param name the name of the file to save to (not including the extension)
 */
JSBufferedSampleRecorder(JSMinim sys,
                         String fileName, 
                         AudioFileFormat.Type fileType, 
                         AudioFormat fileFormat,
                         int bufferSize)
{
  name = fileName;
  type = fileType;
  format = fileFormat;
  buffers = new ArrayList<FloatBuffer>(20);
  left = FloatBuffer.allocate(bufferSize*10);
  if ( format.getChannels() == Minim.STEREO )
  {
    right = FloatBuffer.allocate(bufferSize*10);
  }
  else
  {
    right = null;
  }
  system = sys;
}
项目:openjdk-jdk10    文件:AiffSampleRate.java   
private static boolean testSampleRate(float sampleRate) {
    boolean result = true;

    try {
        // create AudioInputStream with sample rate of 10000 Hz
        ByteArrayInputStream data = new ByteArrayInputStream(new byte[1]);
        AudioFormat format = new AudioFormat(sampleRate, 8, 1, true, true);
        AudioInputStream stream = new AudioInputStream(data, format, 1);

        // write to AIFF file
        ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
        AudioSystem.write(stream, AudioFileFormat.Type.AIFF, outputStream);
        byte[] fileData = outputStream.toByteArray();
        InputStream inputStream = new ByteArrayInputStream(fileData);
        AudioFileFormat aff = AudioSystem.getAudioFileFormat(inputStream);
        if (! equals(sampleRate, aff.getFormat().getFrameRate())) {
            out("error for sample rate " + sampleRate);
            result = false;
        }
    } catch (Exception e) {
        out(e);
        out("Test NOT FAILED");
    }
    return result;
}
项目:rcom    文件:JitterExample.java   
public static void main(String[] args) throws Exception {
    AbstractRcomArgs a=new AbstractRcomArgs();
    UtilCli.parse(a, args, true);
    File folder=new File("/home/rizsi/tmp/video");
    byte[] data=UtilFile.loadFile(new File(folder, "remote.sw"));
    AudioFormat format=ManualTestEchoCancel.getFormat();
    final Mixer mixer = AudioSystem.getMixer(null);
    DataLine.Info info2= new DataLine.Info(SourceDataLine.class, format);
    SourceDataLine s=(SourceDataLine) mixer.getLine(info2);
    s.open(format, framesamples*2);
    s.start();
    try(LoopInputStream lis=new LoopInputStream(data))
    {
        try(JitterResampler rs=new JitterResampler(a, 8000, framesamples, 2))
        {
            new FeedThread(lis, rs).start();
            final byte[] buffer=new byte[framesamples*2];;
            while(true)
            {
                rs.readOutput(buffer);
                s.write(buffer, 0, buffer.length);
            }
        }
    }
}
项目:openjdk-jdk10    文件:PCMtoPCMCodec.java   
@Override
public AudioInputStream getAudioInputStream(AudioFormat.Encoding targetEncoding, AudioInputStream sourceStream) {

    if( isConversionSupported(targetEncoding, sourceStream.getFormat()) ) {

        AudioFormat sourceFormat = sourceStream.getFormat();
        AudioFormat targetFormat = new AudioFormat( targetEncoding,
                                                    sourceFormat.getSampleRate(),
                                                    sourceFormat.getSampleSizeInBits(),
                                                    sourceFormat.getChannels(),
                                                    sourceFormat.getFrameSize(),
                                                    sourceFormat.getFrameRate(),
                                                    sourceFormat.isBigEndian() );

        return getConvertedStream(targetFormat, sourceStream);

    } else {
        throw new IllegalArgumentException("Unsupported conversion: " + sourceStream.getFormat().toString() + " to " + targetEncoding.toString() );
    }

}
项目:rcom    文件:Replay.java   
public static void main(String[] args) throws IOException, LineUnavailableException {
    File folder=new File("/home/rizsi/tmp/video");
    byte[] data=UtilFile.loadFile(new File(folder, "remote.sw"));
    byte[] data2=UtilFile.loadFile(new File(folder, "local.sw"));
    System.out.println("remote.sw max: "+measureMax(data));
    System.out.println("local.sw max: "+measureMax(data2));
    byte[] data3=sum(data, data2);
    UtilFile.saveAsFile(new File(folder, "rawmic.sw"), data3);
    AudioFormat format=ManualTestEchoCancel.getFormat();
    final Mixer mixer = AudioSystem.getMixer(null);
    Play p=new Play(mixer, format, ManualTestEchoCancel.frameSamples)
    {
        @Override
        protected void switchBuffer() {
            if(getSample()==data)
            {
                setSample(data2);
            }else if(getSample()==data2)
            {
                setSample(data3);
            }
        }
    };
    p.start();
    p.setSample(data);
}
项目:BrainControl    文件:Microphone.java   
@Override
public void newProperties(PropertySheet ps) throws PropertyException {
    super.newProperties(ps);
    logger = ps.getLogger();

    sampleRate = ps.getInt(PROP_SAMPLE_RATE);

    int sampleSizeInBits = ps.getInt(PROP_BITS_PER_SAMPLE);

    int channels = ps.getInt(PROP_CHANNELS);
    bigEndian = ps.getBoolean(PROP_BIG_ENDIAN);
    signed = ps.getBoolean(PROP_SIGNED);

    desiredFormat = new AudioFormat((float) sampleRate, sampleSizeInBits, channels, signed, bigEndian);

    closeBetweenUtterances = ps.getBoolean(PROP_CLOSE_BETWEEN_UTTERANCES);
    msecPerRead = ps.getInt(PROP_MSEC_PER_READ);
    keepDataReference = ps.getBoolean(PROP_KEEP_LAST_AUDIO);
    stereoToMono = ps.getString(PROP_STEREO_TO_MONO);
    selectedChannel = ps.getInt(PROP_SELECT_CHANNEL);
    selectedMixerIndex = ps.getString(PROP_SELECT_MIXER);
    audioBufferSize = ps.getInt(PROP_BUFFER_SIZE);
}
项目:OpenJSharp    文件:AudioFloatFormatConverter.java   
public AudioInputStream getAudioInputStream(AudioFormat targetFormat,
        AudioFloatInputStream sourceStream) {

    if (!isConversionSupported(targetFormat, sourceStream.getFormat()))
        throw new IllegalArgumentException("Unsupported conversion: "
                + sourceStream.getFormat().toString() + " to "
                + targetFormat.toString());
    if (targetFormat.getChannels() != sourceStream.getFormat()
            .getChannels())
        sourceStream = new AudioFloatInputStreamChannelMixer(sourceStream,
                targetFormat.getChannels());
    if (Math.abs(targetFormat.getSampleRate()
            - sourceStream.getFormat().getSampleRate()) > 0.000001)
        sourceStream = new AudioFloatInputStreamResampler(sourceStream,
                targetFormat);
    return new AudioInputStream(new AudioFloatFormatConverterInputStream(
            targetFormat, sourceStream), targetFormat, sourceStream
            .getFrameLength());
}
项目:openjdk-jdk10    文件:PCMtoPCMCodec.java   
@Override
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
    Objects.requireNonNull(targetEncoding);

    // filter out targetEncoding from the old getOutputFormats( sourceFormat ) method

    AudioFormat[] formats = getOutputFormats( sourceFormat );
    Vector<AudioFormat> newFormats = new Vector<>();
    for(int i=0; i<formats.length; i++ ) {
        if( formats[i].getEncoding().equals( targetEncoding ) ) {
            newFormats.addElement( formats[i] );
        }
    }

    AudioFormat[] formatArray = new AudioFormat[newFormats.size()];

    for (int i = 0; i < formatArray.length; i++) {
        formatArray[i] = newFormats.elementAt(i);
    }

    return formatArray;
}
项目:OpenJSharp    文件:WaveFloatFileWriter.java   
public void write(AudioInputStream stream, RIFFWriter writer)
        throws IOException {

    RIFFWriter fmt_chunk = writer.writeChunk("fmt ");

    AudioFormat format = stream.getFormat();
    fmt_chunk.writeUnsignedShort(3); // WAVE_FORMAT_IEEE_FLOAT
    fmt_chunk.writeUnsignedShort(format.getChannels());
    fmt_chunk.writeUnsignedInt((int) format.getSampleRate());
    fmt_chunk.writeUnsignedInt(((int) format.getFrameRate())
            * format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getFrameSize());
    fmt_chunk.writeUnsignedShort(format.getSampleSizeInBits());
    fmt_chunk.close();
    RIFFWriter data_chunk = writer.writeChunk("data");
    byte[] buff = new byte[1024];
    int len;
    while ((len = stream.read(buff, 0, buff.length)) != -1)
        data_chunk.write(buff, 0, len);
    data_chunk.close();
}
项目:openjdk-jdk10    文件:NoteOverFlowTest.java   
public static void main(String[] args) throws Exception
{
    AudioSynthesizer synth = new SoftSynthesizer();
    AudioFormat format = new AudioFormat(44100, 16, 2, true, false);
    AudioInputStream stream = synth.openStream(format, null);

    // Make all voices busy, e.g.
    // send midi on and midi off on all available voices
    MidiChannel ch1 = synth.getChannels()[0];
    ch1.programChange(48); // Use contionus instrument like string ensemble
    for (int i = 0; i < synth.getMaxPolyphony(); i++) {
        ch1.noteOn(64, 64);
        ch1.noteOff(64);
    }

    // Now send single midi on, and midi off message
    ch1.noteOn(64, 64);
    ch1.noteOff(64);

    // Read 10 sec from stream, by this time all voices should be inactvie
    stream.skip(format.getFrameSize() * ((int)(format.getFrameRate() * 20)));

    // If no voice are active, then this test will pass
    VoiceStatus[] v = synth.getVoiceStatus();
    for (int i = 0; i < v.length; i++) {
        if(v[i].active)
        {
            throw new RuntimeException("Not all voices are inactive!");
        }
    }

    // Close the synthesizer after use
    synth.close();
}
项目:OpenJSharp    文件:SoftMixingDataLine.java   
public AudioFloatInputStreamResampler(AudioFloatInputStream ais,
        AudioFormat format) {
    this.ais = ais;
    AudioFormat sourceFormat = ais.getFormat();
    targetFormat = new AudioFormat(sourceFormat.getEncoding(), format
            .getSampleRate(), sourceFormat.getSampleSizeInBits(),
            sourceFormat.getChannels(), sourceFormat.getFrameSize(),
            format.getSampleRate(), sourceFormat.isBigEndian());
    nrofchannels = targetFormat.getChannels();
    Object interpolation = format.getProperty("interpolation");
    if (interpolation != null && (interpolation instanceof String)) {
        String resamplerType = (String) interpolation;
        if (resamplerType.equalsIgnoreCase("point"))
            this.resampler = new SoftPointResampler();
        if (resamplerType.equalsIgnoreCase("linear"))
            this.resampler = new SoftLinearResampler2();
        if (resamplerType.equalsIgnoreCase("linear1"))
            this.resampler = new SoftLinearResampler();
        if (resamplerType.equalsIgnoreCase("linear2"))
            this.resampler = new SoftLinearResampler2();
        if (resamplerType.equalsIgnoreCase("cubic"))
            this.resampler = new SoftCubicResampler();
        if (resamplerType.equalsIgnoreCase("lanczos"))
            this.resampler = new SoftLanczosResampler();
        if (resamplerType.equalsIgnoreCase("sinc"))
            this.resampler = new SoftSincResampler();
    }
    if (resampler == null)
        resampler = new SoftLinearResampler2(); // new
    // SoftLinearResampler2();
    pitch[0] = sourceFormat.getSampleRate() / format.getSampleRate();
    pad = resampler.getPadding();
    pad2 = pad * 2;
    ibuffer = new float[nrofchannels][buffer_len + pad2];
    ibuffer2 = new float[nrofchannels * buffer_len];
    ibuffer_index = buffer_len + pad;
    ibuffer_len = buffer_len;
}
项目:openjdk-jdk10    文件:RecognizeHugeWaveFiles.java   
/**
 * Tests the {@code AudioFileFormat} fetched from the fake header.
 * <p>
 * Note that the frameLength and byteLength are stored as int which means
 * that {@code AudioFileFormat} will store the data above {@code MAX_INT} as
 * NOT_SPECIFIED.
 */
private static void testAFF(final byte[] type, final int rate,
                            final int channel, final long size)
        throws Exception {
    final byte[] header = createHeader(type, rate, channel, size);
    final ByteArrayInputStream fake = new ByteArrayInputStream(header);
    final AudioFileFormat aff = AudioSystem.getAudioFileFormat(fake);
    final AudioFormat format = aff.getFormat();

    if (aff.getType() != AudioFileFormat.Type.WAVE) {
        throw new RuntimeException("Error");
    }

    final long frameLength = size / format.getFrameSize();
    if (frameLength <= Integer.MAX_VALUE) {
        if (aff.getFrameLength() != frameLength) {
            System.err.println("Expected: " + frameLength);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    } else {
        if (aff.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
            System.err.println("Expected: " + AudioSystem.NOT_SPECIFIED);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    }
    validateFormat(type[1], rate, channel, aff.getFormat());
}
项目:openjdk-jdk10    文件:UlawCodec.java   
@Override
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
    Objects.requireNonNull(targetEncoding);
    Objects.requireNonNull(sourceFormat);
    if( (AudioFormat.Encoding.PCM_SIGNED.equals(targetEncoding)
         && AudioFormat.Encoding.ULAW.equals(sourceFormat.getEncoding()))
        ||
        (AudioFormat.Encoding.ULAW.equals(targetEncoding)
         && AudioFormat.Encoding.PCM_SIGNED.equals(sourceFormat.getEncoding()))) {
            return getOutputFormats(sourceFormat);
        } else {
            return new AudioFormat[0];
        }
}
项目:romanov    文件:FloatSampleBuffer.java   
/**
 * Writes this sample buffer's audio data to <code>buffer</code> as an
 * interleaved byte array. <code>buffer</code> must be large enough to
 * hold all data.
 * 
 * @param readOffset the sample offset from where samples are read from this
 *            FloatSampleBuffer
 * @param lenInSamples how many samples are converted
 * @param buffer the byte buffer written to
 * @param writeOffset the byte offset in buffer
 * @throws IllegalArgumentException when buffer is too small or
 *             <code>format</code> doesn't match
 * @return number of bytes written to <code>buffer</code>
 */
public int convertToByteArray(int readOffset, int lenInSamples,
        byte[] buffer, int writeOffset, AudioFormat format) {
    int byteCount = format.getFrameSize() * lenInSamples;
    if (writeOffset + byteCount > buffer.length) {
        throw new IllegalArgumentException(
                "FloatSampleBuffer.convertToByteArray: buffer too small.");
    }
    if (format != lastConvertToByteArrayFormat) {
        if (format.getSampleRate() != getSampleRate()) {
            throw new IllegalArgumentException(
                    "FloatSampleBuffer.convertToByteArray: different samplerates.");
        }
        if (format.getChannels() != getChannelCount()) {
            throw new IllegalArgumentException(
                    "FloatSampleBuffer.convertToByteArray: different channel count.");
        }
        lastConvertToByteArrayFormat = format;
        lastConvertToByteArrayFormatCode = FloatSampleTools.getFormatType(format);
    }
    FloatSampleTools.float2byte(channels, readOffset, buffer, writeOffset,
            lenInSamples, lastConvertToByteArrayFormatCode,
            format.getChannels(), format.getFrameSize(),
            getConvertDitherBits(lastConvertToByteArrayFormatCode));

    return byteCount;
}
项目:openjdk-jdk10    文件:WriteAuUnspecifiedLength.java   
public static void main(String argv[]) throws Exception {
    AudioFormat format = new AudioFormat(44100, 16, 2, true, true);
    InputStream is = new ByteArrayInputStream(new byte[1000]);
    AudioInputStream ais = new AudioInputStream(is, format, AudioSystem.NOT_SPECIFIED);
    AudioSystem.write(ais, AudioFileFormat.Type.AU, new ByteArrayOutputStream());
    System.out.println("Test passed.");
}
项目:openjdk-jdk10    文件:PCMtoPCMCodec.java   
@Override
public AudioFormat.Encoding[] getTargetEncodings(AudioFormat sourceFormat) {

    final int sampleSize = sourceFormat.getSampleSizeInBits();
    AudioFormat.Encoding encoding = sourceFormat.getEncoding();
    if (sampleSize == 8) {
        if (encoding.equals(AudioFormat.Encoding.PCM_SIGNED)) {
            return new AudioFormat.Encoding[]{
                    AudioFormat.Encoding.PCM_UNSIGNED
            };
        }
        if (encoding.equals(AudioFormat.Encoding.PCM_UNSIGNED)) {
            return new AudioFormat.Encoding[]{
                    AudioFormat.Encoding.PCM_SIGNED
            };
        }
    } else if (sampleSize == 16) {
        if (encoding.equals(AudioFormat.Encoding.PCM_SIGNED)
                || encoding.equals(AudioFormat.Encoding.PCM_UNSIGNED)) {
            return new AudioFormat.Encoding[]{
                    AudioFormat.Encoding.PCM_UNSIGNED,
                    AudioFormat.Encoding.PCM_SIGNED
            };
        }
    }
    return new AudioFormat.Encoding[0];
}
项目:romanov    文件:FloatSampleTools.java   
/**
 * @see #byte2float(byte[] input, int inByteOffset, Object[] output, int
 *      outOffset, int frameCount, AudioFormat format, boolean
 *      allowAddChannel)
 */
public static void byte2float(byte[] input, int inByteOffset,
        List<float[]> output, int outOffset, int frameCount,
        AudioFormat format) {

    byte2float(input, inByteOffset, output, outOffset, frameCount, format,
            true);
}
项目:rcom    文件:Replay2.java   
public static void main(String[] args) throws IOException, LineUnavailableException {
    try(Scanner br=new Scanner(System.in))
    {
        String s=br.nextLine();
        byte[] data=UtilFile.loadFile(new File("/tmp/"+s+".sw"));
        System.out.println("Playing: "+s);
        AudioFormat format=ManualTestEchoCancel.getFormat();
        final Mixer mixer = AudioSystem.getMixer(null);
        Play p=new Play(mixer, format, ManualTestEchoCancel.frameSamples)
        {
        };
        p.start();
        p.setSample(data);
    }
}
项目:openjdk-jdk10    文件:AlawCodec.java   
@Override
public AudioInputStream getAudioInputStream(AudioFormat targetFormat, AudioInputStream sourceStream){
    if (!isConversionSupported(targetFormat, sourceStream.getFormat()))
        throw new IllegalArgumentException("Unsupported conversion: "
                                           + sourceStream.getFormat().toString() + " to "
                                           + targetFormat.toString());
    return getConvertedStream( targetFormat, sourceStream );
}
项目:openjdk-jdk10    文件:Toolkit.java   
static boolean isFullySpecifiedPCMFormat(AudioFormat format) {
    if (!format.getEncoding().equals(AudioFormat.Encoding.PCM_SIGNED)
        && !format.getEncoding().equals(AudioFormat.Encoding.PCM_UNSIGNED)) {
        return false;
    }
    if ((format.getFrameRate() <= 0)
        || (format.getSampleRate() <= 0)
        || (format.getSampleSizeInBits() <= 0)
        || (format.getFrameSize() <= 0)
        || (format.getChannels() <= 0)) {
        return false;
    }
    return true;
}
项目:romanov    文件:JSMinim.java   
public AudioSample getAudioSample(float[] left, float[] right, AudioFormat format, int bufferSize)
{
  FloatSampleBuffer sample = new FloatSampleBuffer(2, left.length, format.getSampleRate());
  System.arraycopy(left, 0, sample.getChannel(0), 0, left.length);
  System.arraycopy(right, 0, sample.getChannel(1), 0, right.length);
  return getAudioSampleImp(sample, format, bufferSize);
}
项目:jdk8u-jdk    文件:UlawCodec.java   
/**
 */
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
    if( (AudioFormat.Encoding.PCM_SIGNED.equals(targetEncoding)
         && AudioFormat.Encoding.ULAW.equals(sourceFormat.getEncoding()))
        ||
        (AudioFormat.Encoding.ULAW.equals(targetEncoding)
         && AudioFormat.Encoding.PCM_SIGNED.equals(sourceFormat.getEncoding()))) {
            return getOutputFormats(sourceFormat);
        } else {
            return new AudioFormat[0];
        }
}
项目:openjdk-jdk10    文件:Toolkit.java   
static void isFullySpecifiedAudioFormat(AudioFormat format) {
    if (!format.getEncoding().equals(AudioFormat.Encoding.PCM_SIGNED)
        && !format.getEncoding().equals(AudioFormat.Encoding.PCM_UNSIGNED)
        && !format.getEncoding().equals(AudioFormat.Encoding.ULAW)
        && !format.getEncoding().equals(AudioFormat.Encoding.ALAW)) {
        // we don't know how to verify possibly non-linear encodings
        return;
    }
    if (format.getFrameRate() <= 0) {
        throw new IllegalArgumentException("invalid frame rate: "
                                           +((format.getFrameRate()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getFrameRate())));
    }
    if (format.getSampleRate() <= 0) {
        throw new IllegalArgumentException("invalid sample rate: "
                                           +((format.getSampleRate()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getSampleRate())));
    }
    if (format.getSampleSizeInBits() <= 0) {
        throw new IllegalArgumentException("invalid sample size in bits: "
                                           +((format.getSampleSizeInBits()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getSampleSizeInBits())));
    }
    if (format.getFrameSize() <= 0) {
        throw new IllegalArgumentException("invalid frame size: "
                                           +((format.getFrameSize()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getFrameSize())));
    }
    if (format.getChannels() <= 0) {
        throw new IllegalArgumentException("invalid number of channels: "
                                           +((format.getChannels()==-1)?
                                             "NOT_SPECIFIED":String.valueOf(format.getChannels())));
    }
}
项目:romanov    文件:Minim.java   
/**
 * Loads the requested file into an {@link AudioPlayer} with the request
 * buffer size.
 * 
 * @param filename
 *            the file or URL you want to load
 * @param bufferSize
 *            int: the sample buffer size you want, which determines the 
 *            size of the left, right, and mix AudioBuffer fields of the 
 *            returned AudioPlayer.
 * 
 * @return an <code>AudioPlayer</code> with a sample buffer of the requested
 *         size, or null if we were unable to load the file
 */
public AudioPlayer loadFile(String filename, int bufferSize)
{
    AudioPlayer player          = null;
    AudioRecordingStream rec    = mimp.getAudioRecordingStream( filename, bufferSize, false );
    if ( rec != null )
    {
        AudioFormat format  = rec.getFormat();
        AudioOut out        = mimp.getAudioOutput( format.getChannels(),
                                                   bufferSize, 
                                                   format.getSampleRate(),
                                                   format.getSampleSizeInBits() );

        if ( out != null )
        {
            player = new AudioPlayer( rec, out );
        }
        else
        {
            rec.close();
        }
    }

    if ( player != null )
    {
        addSource( player );
    }
    else
    {
        error( "Couldn't load the file " + filename );
    }

    return player;
}
项目:jdk8u-jdk    文件:PCMtoPCMCodec.java   
/**
 */
public AudioFormat.Encoding[] getTargetEncodings(AudioFormat sourceFormat){

    if( sourceFormat.getEncoding().equals( AudioFormat.Encoding.PCM_SIGNED ) ||
        sourceFormat.getEncoding().equals( AudioFormat.Encoding.PCM_UNSIGNED ) ) {

            AudioFormat.Encoding encs[] = new AudioFormat.Encoding[2];
            encs[0] = AudioFormat.Encoding.PCM_SIGNED;
            encs[1] = AudioFormat.Encoding.PCM_UNSIGNED;
            return encs;
        } else {
            return new AudioFormat.Encoding[0];
        }
}
项目:rcom    文件:ManualTestEchoCancel2.java   
public static AudioFormat getFormat() {
    float sampleRate = 8000;
    int sampleSizeInBits = 16;
    int channels = 1;
    boolean signed = true;
    // The platform default byte order
    boolean bigEndian = ByteOrder.nativeOrder()==ByteOrder.BIG_ENDIAN;
    return new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
}
项目:openjdk-jdk10    文件:DirectAudioDevice.java   
@Override
public void open(AudioFormat format, byte[] data, int offset, int bufferSize)
    throws LineUnavailableException {

    // $$fb part of fix for 4679187: Clip.open() throws unexpected Exceptions
    Toolkit.isFullySpecifiedAudioFormat(format);
    Toolkit.validateBuffer(format.getFrameSize(), bufferSize);

    byte[] newData = new byte[bufferSize];
    System.arraycopy(data, offset, newData, 0, bufferSize);
    open(format, newData, bufferSize / format.getFrameSize());
}
项目:openjdk-jdk10    文件:RecognizeHugeWaveExtFiles.java   
/**
 * Tests the {@code AudioFileFormat} fetched from the fake header.
 * <p>
 * Note that the frameLength and byteLength are stored as int which means
 * that {@code AudioFileFormat} will store the data above {@code MAX_INT} as
 * NOT_SPECIFIED.
 */
private static void testAFF(final int[] type, final int rate,
                            final int channel, final long size)
        throws Exception {
    final byte[] header = createHeader(type, rate, channel, size);
    final ByteArrayInputStream fake = new ByteArrayInputStream(header);
    final AudioFileFormat aff = AudioSystem.getAudioFileFormat(fake);
    final AudioFormat format = aff.getFormat();

    if (aff.getType() != AudioFileFormat.Type.WAVE) {
        throw new RuntimeException("Error");
    }

    final long frameLength = size / format.getFrameSize();
    if (frameLength <= Integer.MAX_VALUE) {
        if (aff.getFrameLength() != frameLength) {
            System.err.println("Expected: " + frameLength);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    } else {
        if (aff.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
            System.err.println("Expected: " + AudioSystem.NOT_SPECIFIED);
            System.err.println("Actual: " + aff.getFrameLength());
            throw new RuntimeException();
        }
    }
    validateFormat(type[1], rate, channel, aff.getFormat());
}
项目:tcc-rpg    文件:MicThread.java   
public MicThread(ObjectOutputStream toServer) throws LineUnavailableException {
    this.toServer = toServer;
    //open microphone line, an exception is thrown in case of error
    AudioFormat af = SoundPacket.defaultFormat;
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, null);
    mic = (TargetDataLine) (AudioSystem.getLine(info));
    mic.open(af);
    mic.start();
}
项目:rcom    文件:ManualTestEchoCancel.java   
public static AudioFormat getFormat() {
    float sampleRate = 8000;
    int sampleSizeInBits = 16;
    int channels = 1;
    boolean signed = true;
    // The platform default byte order
    boolean bigEndian = ByteOrder.nativeOrder()==ByteOrder.BIG_ENDIAN;
    return new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
}
项目:openjdk-jdk10    文件:AlawCodec.java   
@Override
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
    Objects.requireNonNull(sourceFormat);
    if( (targetEncoding.equals( AudioFormat.Encoding.PCM_SIGNED ) && sourceFormat.getEncoding().equals( AudioFormat.Encoding.ALAW)) ||
        (targetEncoding.equals( AudioFormat.Encoding.ALAW) && sourceFormat.getEncoding().equals( AudioFormat.Encoding.PCM_SIGNED)) ) {
            return getOutputFormats( sourceFormat );
        } else {
            return new AudioFormat[0];
        }
}
项目:openjdk-jdk10    文件:AudioFloatFormatConverter.java   
@Override
public AudioInputStream getAudioInputStream(AudioFormat targetFormat,
                                            AudioInputStream sourceStream) {
    if (!isConversionSupported(targetFormat, sourceStream.getFormat()))
        throw new IllegalArgumentException("Unsupported conversion: "
                + sourceStream.getFormat().toString() + " to "
                + targetFormat.toString());
    return getAudioInputStream(targetFormat, AudioFloatInputStream
            .getInputStream(sourceStream));
}