1,音视频通话一对一;音视频会议多对多。
2,视频会议最难的是网络,没有多对多的模型。1个server端,n个client端,可以实现多对多。
1,h264分成两种编码方式,一种是类似67(分三部分,是否可用,重要性,帧类型),另一种是哥伦布编码。
2,分隔符是固定字节,67也是固定字节。解析起来很方便。
3,如果解析宽高也用固定字节就不合适,有的宽高大,有的宽高小。这样会造成浪费。这种情况下就要使用哥伦布编码了。
4,一个宏块,与下一个宏块大小不一样,怎么保证读取第一个宏块的时候不越界?
(1),可以使用xml标签,但是标签也用数据,不合适。
(2),长度+内容。就像MMKV一样。虽然可以,但是长度也需要存储。不合适
5,哥伦布编码:
哥伦布编码适合于数量小(像素最大就是255)但出现频繁的数量。一个汉字两个字节,属于定长编码。
哥伦布属于变长编码。
4对应的二进制是100,首先,4+1,等于5,101,1后面有两个2进制位,就在前面填两个0,变成00101.解码的时候,1前面有几个0,就往后读几个2进制位。
1,音频是一种波,可以合成,可以分解
2, 两个波的混播,每个字节相加。如果超过了32767(最大值),就赋值位32767。
3,波形越高,声音越高。每个值乘以系数,声音就会增大。
4,如何识别噪音?算法很复杂,
5,PCM就是一种原始的波,保存一个波点要用2个字节,比如 ac c4
效果图
需要4个临时文件
String mime = format.getString(MediaFormat.KEY_MIME);断点调试获取到的信息
WAV,对pcm进行封装,封装后可以用播放器播放。
package com.maniu.douyinclip;
import android.annotation.SuppressLint;
import android.media.AudioFormat;
import android.media.MediaCodec;
import android.media.MediaExtractor;
import android.media.MediaFormat;
import android.os.Environment;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
public class MusicProcess {
/**
* @param videoInput 路径
* @param audioInput
* @param output 输出路径
* @param startTimeUs 剪辑开始时间
* @param endTimeUs 剪辑结束时间
* @param videoVolume 视频原生音量大小
* @param aacVolume 音频的声量大小
* @throws Exception
*/
//视频剪辑是耗时的,所以调用它的地方要是子线程
public static void mixAudioTrack(final String videoInput,//
final String audioInput,
final String output,
final Integer startTimeUs,
final Integer endTimeUs,
int videoVolume,
int aacVolume) throws Exception {
//mp3是压缩数据,不是原生数据,原生数据是pcm。视频中的音乐和音频的音乐都是压缩数据。首先要把两个音乐解析成pcm,混音后生成新的pcm。
File cacheDir = Environment.getExternalStorageDirectory();
//解析后的视频的pcm输出路径
final File videoPcmFile = new File(cacheDir, "video" + ".pcm");
//
decodeToPCM(videoInput, videoPcmFile.getAbsolutePath(), startTimeUs, endTimeUs);
// 下载下来的音乐转换城pcm
File aacPcmFile = new File(cacheDir, "audio" + ".pcm");
//音频转成PCM
decodeToPCM(audioInput,
aacPcmFile.getAbsolutePath(), startTimeUs, endTimeUs );
// 混音,audio.pcm和video.pcm混音
File adjustedPcm = new File(cacheDir, "混合后的" + ".pcm");
mixPcm(videoPcmFile.getAbsolutePath(), aacPcmFile.getAbsolutePath(),
adjustedPcm.getAbsolutePath()
, videoVolume, aacVolume);
File wavFile = new File(cacheDir, adjustedPcm.getName()
+ ".wav");
new PcmToWavUtil(44100, AudioFormat.CHANNEL_IN_STEREO,
2, AudioFormat.ENCODING_PCM_16BIT).pcmToWav(adjustedPcm.getAbsolutePath()
, wavFile.getAbsolutePath());
}
//把值进行加法运算
public static void mixPcm(String pcm1Path, String pcm2Path, String toPath
, int vol1, int vol2) throws IOException {
float volume1=vol1 / 100f * 1;
float volume2=vol2 / 100f * 1;
//待混音的两条数据流
FileInputStream is1 = new FileInputStream(pcm1Path);
FileInputStream is2 = new FileInputStream(pcm2Path);
boolean end1 = false, end2 = false;
// 输出的数据流
FileOutputStream fileOutputStream = new FileOutputStream(toPath);
byte[] buffer1 = new byte[2048];
byte[] buffer2 = new byte[2048];
byte[] buffer3 = new byte[2048];
short temp2, temp1;
while (!end1 || !end2) {
if (!end2) {
end2 = (is2.read(buffer2) == -1);
}
if (!end1) {
end1 = (is1.read(buffer1) == -1);
}
int voice = 0;
//2个字节
for (int i = 0; i < buffer2.length; i += 2) {
//前 低字节 1 后面低字节 2 声量值
// 32767 -32768
temp1 = (short) ((buffer1[i] & 0xff) | (buffer1[i + 1] & 0xff) << 8);
temp2 = (short) ((buffer2[i] & 0xff) | (buffer2[i + 1] & 0xff) << 8);
voice = (int) (temp1*volume1 + temp2*volume2);
if (voice > 32767) {
voice = 32767;
}else if (voice < -32768) {
voice = -32768;
}
//
buffer3[i] = (byte) (voice & 0xFF);
buffer3[i + 1] = (byte) ((voice >>> 8) & 0xFF);
}
fileOutputStream.write(buffer3);
}
is1.close();
is2.close();
fileOutputStream.close();
}
/**
*
* @param musicPath 视频文件
* @param outPath 输出
* @param startTime 开始时间
* @param endTime 结束时间
* @throws Exception
*/
@SuppressLint("WrongConstant")
public static void decodeToPCM(String musicPath,
String outPath, int startTime, int endTime) throws Exception {
if (endTime < startTime) {
return;
}
//MediaExtractor用来从封装格式读取视频流、音频流
MediaExtractor mediaExtractor = new MediaExtractor();
// 设值路径
mediaExtractor.setDataSource(musicPath);
// 音频流和视频流没有先后顺序,mediaExtractor提出了轨道概念。音频流和视频流就是
//音频轨和视频轨,轨道的数量并不确定,视频轨、音频轨可以有多个,另外也可以有字母轨道等。
int audioTrack = selectTrack(mediaExtractor);
//选择轨道
mediaExtractor.selectTrack(audioTrack);
//剪辑视频,seek到指定时间。
// seek以视频为准,seek到B帧毫无意义。MediaExtractor.SEEK_TO_NEXT_SYNC表示假如seek到B帧,B帧到下一个I帧的内容直接丢弃。
mediaExtractor.seekTo(startTime, MediaExtractor.SEEK_TO_NEXT_SYNC);
MediaFormat audioFormat = mediaExtractor.getTrackFormat(audioTrack);
//解码器,直接从Mime中获取,绝对不能写死
MediaCodec mediaCodec = MediaCodec.createDecoderByType(audioFormat.getString((MediaFormat.KEY_MIME)));
mediaCodec.configure(audioFormat, null, null, 0);
mediaCodec.start();
int maxBufferSize = 100 * 1000;
if (audioFormat.containsKey(MediaFormat.KEY_MAX_INPUT_SIZE)) {//查看上面的断点图片。有这个值
maxBufferSize = audioFormat.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
} else {
maxBufferSize = 100 * 1000;
}
File pcmFile = new File(outPath);
//pamFile 输出
FileChannel writeChannel = new FileOutputStream(pcmFile).getChannel();
// buffer大了造成内存浪费,小于内存溢出,抛异常
ByteBuffer buffer = ByteBuffer.allocateDirect(maxBufferSize);
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
while (true) {
int inIndex = mediaCodec.dequeueInputBuffer(1000);
if (inIndex >= 0) {
// 获取到视频容器里面读取的当前时间戳,因为前面已经Seek了
long sampleTimeUs = mediaExtractor.getSampleTime();
//-1代表读到末尾了。
if (sampleTimeUs == -1) {
break;
}else if (sampleTimeUs < startTime) {
// seek以视频为准,seek到B帧毫无意义。假如seek到B帧,B帧到下一个I帧的内容直接丢弃(advance)。
mediaExtractor.advance();
}else if (sampleTimeUs > endTime) {
//大于endTime表示就不需要读了
break;
}
// 借助mediaExtractor读数据,buffer自己实现的。
info.size= mediaExtractor.readSampleData(buffer, 0);
info.presentationTimeUs = sampleTimeUs;//时间戳
info.flags = mediaExtractor.getSampleFlags();//标志,是否可用
//从Mp4读出来的压缩数据
byte[] content = new byte[buffer.remaining()];
buffer.get(content);
FileUtils.writeContent(content);
//放到DSP芯片
ByteBuffer inputBuffer = mediaCodec.getInputBuffer(inIndex);
inputBuffer.put(content);
mediaCodec.queueInputBuffer(inIndex, 0, info.size, info.presentationTimeUs, info.flags);
// 释放上一帧的压缩数据
mediaExtractor.advance();
}
//解码
int outIndex = -1;
outIndex = mediaCodec.dequeueOutputBuffer(info, 1_000);
if (outIndex >= 0) {
ByteBuffer decodeOutputBuffer = mediaCodec.getOutputBuffer(outIndex);
//decodeOutputBuffer是原始数据
writeChannel.write(decodeOutputBuffer);
mediaCodec.releaseOutputBuffer(outIndex, false);
}
}
writeChannel.close();
mediaExtractor.release();
mediaCodec.stop();
mediaCodec.release();
}
// 寻找音频轨
public static int selectTrack(MediaExtractor extractor) {
//获取轨道数量
int numTracks = extractor.getTrackCount();
//通过for循环拿到轨道信息
for (int i = 0; i < numTracks; i++) {
//得到轨道配置信息
MediaFormat format = extractor.getTrackFormat(i);
//轨道类型,
String mime = format.getString(MediaFormat.KEY_MIME);
//audio开头就是音频流
if (mime.startsWith("audio")) {
return i;
}
}
return -1;
}
}
package com.maniu.douyinclip;
import android.media.AudioFormat;
import android.media.AudioRecord;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
/**
* PCM转WAV,先转头,在转pcm,转完后可以用播放器播放。
*/
public class PcmToWavUtil {
private int mBufferSize; //缓存的音频大小
private int mSampleRate = 8000;// 8000|16000
private int mChannelConfig = AudioFormat.CHANNEL_IN_STEREO; //立体声
private int mChannelCount = 2;
private int mEncoding = AudioFormat.ENCODING_PCM_16BIT;
public PcmToWavUtil() {
this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannelConfig, mEncoding);
}
/**
* @param sampleRate sample rate、采样率
* @param channelConfig channel、声道
* @param encoding Audio data format、音频格式
*/
public PcmToWavUtil(int sampleRate, int channelConfig, int channelCount, int encoding) {
this.mSampleRate = sampleRate;
this.mChannelConfig = channelConfig;
this.mChannelCount = channelCount;
this.mEncoding = encoding;
this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannelConfig, mEncoding);
}
/**
* pcm文件转wav文件
*
* @param inFilename 源文件路径
* @param outFilename 目标文件路径
*/
public void pcmToWav(String inFilename, String outFilename) {
FileInputStream in;
FileOutputStream out;
long totalAudioLen;
long totalDataLen;
long longSampleRate = mSampleRate;
int channels = mChannelCount;
long byteRate = 16 * mSampleRate * channels / 8;
byte[] data = new byte[mBufferSize];
try {
in = new FileInputStream(inFilename);
out = new FileOutputStream(outFilename);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
writeWaveFileHeader(out, totalAudioLen, totalDataLen,
longSampleRate, channels, byteRate);
while (in.read(data) != -1) {
out.write(data);
}
in.close();
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 加入wav文件头
*/
private void writeWaveFileHeader(FileOutputStream out, long totalAudioLen,
long totalDataLen, long longSampleRate, int channels, long byteRate)
throws IOException {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF/WAVE header
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W'; //WAVE
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1; // format = 1
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) (2 * 16 / 8); // block align
header[33] = 0;
header[34] = 16; // bits per sample
header[35] = 0;
header[36] = 'd'; //data
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
}
我们得到了音频WAV格式,我们现在和视频码流混合,形成新的视频。
package com.maniu.douyinclip;
import android.annotation.SuppressLint;
import android.media.AudioFormat;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaExtractor;
import android.media.MediaFormat;
import android.media.MediaMuxer;
import android.os.Environment;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
public class MusicProcess {
private static final long TIMEOUT = 1000;
public static void mixAudioTrack(final String videoInput,//
final String audioInput,
final String output,
final Integer startTimeUs,
final Integer endTimeUs,
int videoVolume,
int aacVolume) throws Exception {
File cacheDir = Environment.getExternalStorageDirectory();
final File videoPcmFile = new File(cacheDir, "video" + ".pcm");
decodeToPCM(videoInput, videoPcmFile.getAbsolutePath(), startTimeUs, endTimeUs);
File aacPcmFile = new File(cacheDir, "audio" + ".pcm");
decodeToPCM(audioInput,
aacPcmFile.getAbsolutePath(), startTimeUs, endTimeUs );
File adjustedPcm = new File(cacheDir, "混合后的" + ".pcm");
mixPcm(videoPcmFile.getAbsolutePath(), aacPcmFile.getAbsolutePath(),
adjustedPcm.getAbsolutePath()
, videoVolume, aacVolume);
File wavFile = new File(cacheDir, adjustedPcm.getName()
+ ".wav");
new PcmToWavUtil(44100, AudioFormat.CHANNEL_IN_STEREO,
2, AudioFormat.ENCODING_PCM_16BIT).pcmToWav(adjustedPcm.getAbsolutePath()
, wavFile.getAbsolutePath());
mixVideoAndMusic(videoInput, output, startTimeUs, endTimeUs, wavFile);
}
//从MP4里获取视频(剔除其音频内容),和我们现有的音频混合。
@SuppressLint("WrongConstant")
private static void mixVideoAndMusic(String videoInput, String output, Integer startTimeUs, Integer endTimeUs, File wavFile) throws IOException {
// 输出容器,但支持的格式比较少。有MP4等。
MediaMuxer mediaMuxer = new MediaMuxer(output,MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
// 读取视频的工具类
MediaExtractor mediaExtractor = new MediaExtractor();
mediaExtractor.setDataSource(videoInput);
// 拿到视频轨道的索引
int videoIndex = selectTrack(mediaExtractor, false);
int audioIndex = selectTrack(mediaExtractor, true);
//
MediaFormat videoFormat= mediaExtractor.getTrackFormat(videoIndex);
// 添加轨道,容器里有多少轨道,就调用几次addTrack。videoFormat从源数据中拿
//从原来的视频中取出视频轨设置到新的video中
mediaMuxer.addTrack(videoFormat);
MediaFormat audioFormat = mediaExtractor.getTrackFormat(audioIndex);
int audioBitrate = audioFormat.getInteger(MediaFormat.KEY_BIT_RATE);
audioFormat.setString(MediaFormat.KEY_MIME, MediaFormat.MIMETYPE_AUDIO_AAC);
int muxerAudioIndex = mediaMuxer.addTrack(audioFormat);
//开始输出视频任务,输出的时候,音视频先输出视频流,在输出音视频流,或者反之,不能交替输出
mediaMuxer.start();
//我们这里先输出音频
MediaExtractor pcmExtrator = new MediaExtractor();
pcmExtrator.setDataSource(wavFile.getAbsolutePath());
int audioTrack = selectTrack(pcmExtrator, true);
pcmExtrator.selectTrack(audioTrack);
MediaFormat pcmTrackFormat = pcmExtrator.getTrackFormat(audioTrack);
//最大一帧的 大小
int maxBufferSize = 0;
if (audioFormat.containsKey(MediaFormat.KEY_MAX_INPUT_SIZE)) {
maxBufferSize = pcmTrackFormat.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
} else {
maxBufferSize = 100 * 1000;
}
MediaFormat encodeFormat = MediaFormat.createAudioFormat(MediaFormat.MIMETYPE_AUDIO_AAC,
44100, 2);//参数对应-> mime type、采样率、声道数
encodeFormat.setInteger(MediaFormat.KEY_BIT_RATE, audioBitrate);//比特率
// 音质等级
encodeFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
// 解码 那段
encodeFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, maxBufferSize);
MediaCodec encoder = MediaCodec.createEncoderByType(MediaFormat.MIMETYPE_AUDIO_AAC);
encoder.configure(encodeFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
// 开始编码
encoder.start();
// 自己读取 从 MediaExtractor
ByteBuffer buffer = ByteBuffer.allocateDirect(maxBufferSize);
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
//是否编码完成
boolean encodeDone = false;
while (!encodeDone) {
int inputBufferIndex = encoder.dequeueInputBuffer(10000);
if (inputBufferIndex >= 0) {
// 返回值 是 时间戳 <0文件读到了末尾
long sampleTime = pcmExtrator.getSampleTime();
if (sampleTime < 0) {
encoder.queueInputBuffer(inputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
}else {
int flags = pcmExtrator.getSampleFlags();
int size =pcmExtrator.readSampleData(buffer, 0);
ByteBuffer inputBuffer = encoder.getInputBuffer(inputBufferIndex);
inputBuffer.clear();
inputBuffer.put(buffer);
inputBuffer.position(0);
// 通知编码
encoder.queueInputBuffer(inputBufferIndex, 0, size, sampleTime, flags);
// 放弃内存, 一定要写 不写不能督导新的数据
pcmExtrator.advance();
}
}
// 输出的容器的索引 1
int outIndex = encoder.dequeueOutputBuffer(info, TIMEOUT);
while (outIndex>=0) {
if (info.flags == MediaCodec.BUFFER_FLAG_END_OF_STREAM) {
encodeDone = true;
break;
}
// 通过索引 得到 编码好的数据在哪个容器
ByteBuffer encodeOutputBuffer = encoder.getOutputBuffer(outIndex);
//数据写进去了
mediaMuxer.writeSampleData(muxerAudioIndex, encodeOutputBuffer, info);
// 清空容器数据 方便下次读
encodeOutputBuffer.clear();
// 把编码器的数据释放 ,方便dsp 下一帧存
encoder.releaseOutputBuffer(outIndex, false);
outIndex = encoder.dequeueOutputBuffer(info, TIMEOUT);
}
}
// 视频
if (audioTrack >= 0) {
mediaExtractor.unselectTrack(audioTrack);
}
//视频
mediaExtractor.selectTrack(videoIndex);
// seek到 startTimeUs 时间戳的 前一个I帧
mediaExtractor.seekTo(startTimeUs, MediaExtractor.SEEK_TO_PREVIOUS_SYNC);
//视频 最大帧 最大的那一个帧 大小
maxBufferSize = videoFormat.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
buffer = ByteBuffer.allocateDirect(maxBufferSize);
// 加水印 1 先解码 --后编码 原始数据搞事情 2 不需要
while (true) {
long sampleTimeUs = mediaExtractor.getSampleTime();
if (sampleTimeUs == -1) {
break;
}
if (sampleTimeUs < startTimeUs) {
mediaExtractor.advance();
continue;
}
if (endTimeUs != null && sampleTimeUs > endTimeUs) {
break;
}
info.presentationTimeUs = sampleTimeUs - startTimeUs+600;
info.flags = mediaExtractor.getSampleFlags();
info.size = mediaExtractor.readSampleData(buffer, 0);
if (info.size < 0) {
break;
}
// 写入视频数据
mediaMuxer.writeSampleData(videoIndex, buffer, info);
// advance
mediaExtractor.advance();
}
pcmExtrator.release();
mediaExtractor.release();
encoder.stop();
encoder.release();
mediaMuxer.release();
}
public static void mixPcm(String pcm1Path, String pcm2Path, String toPath
, int vol1, int vol2) throws IOException {
float volume1=vol1 / 100f * 1;
float volume2=vol2 / 100f * 1;
FileInputStream is1 = new FileInputStream(pcm1Path);
FileInputStream is2 = new FileInputStream(pcm2Path);
boolean end1 = false, end2 = false;
// 输出的数据流
FileOutputStream fileOutputStream = new FileOutputStream(toPath);
byte[] buffer1 = new byte[2048];
byte[] buffer2 = new byte[2048];
byte[] buffer3 = new byte[2048];
short temp2, temp1;
while (!end1 || !end2) {
if (!end2) {
end2 = (is2.read(buffer2) == -1);
}
if (!end1) {
end1 = (is1.read(buffer1) == -1);
}
int voice = 0;
for (int i = 0; i < buffer2.length; i += 2) {
temp1 = (short) ((buffer1[i] & 0xff) | (buffer1[i + 1] & 0xff) << 8);
temp2 = (short) ((buffer2[i] & 0xff) | (buffer2[i + 1] & 0xff) << 8);
voice = (int) (temp1*volume1 + temp2*volume2);
if (voice > 32767) {
voice = 32767;
}else if (voice < -32768) {
voice = -32768;
}
buffer3[i] = (byte) (voice & 0xFF);
buffer3[i + 1] = (byte) ((voice >>> 8) & 0xFF);
}
fileOutputStream.write(buffer3);
}
is1.close();
is2.close();
fileOutputStream.close();
}
@SuppressLint("WrongConstant")
public static void decodeToPCM(String musicPath,
String outPath, int startTime, int endTime) throws Exception {
if (endTime < startTime) {
return;
}
MediaExtractor mediaExtractor = new MediaExtractor();
mediaExtractor.setDataSource(musicPath);
int audioTrack = selectTrack(mediaExtractor,true);
mediaExtractor.selectTrack(audioTrack);
mediaExtractor.seekTo(startTime, MediaExtractor.SEEK_TO_NEXT_SYNC);
MediaFormat audioFormat = mediaExtractor.getTrackFormat(audioTrack);
MediaCodec mediaCodec = MediaCodec.createDecoderByType(audioFormat.getString((MediaFormat.KEY_MIME)));
mediaCodec.configure(audioFormat, null, null, 0);
mediaCodec.start();
int maxBufferSize = 100 * 1000;
if (audioFormat.containsKey(MediaFormat.KEY_MAX_INPUT_SIZE)) {
maxBufferSize = audioFormat.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
} else {
maxBufferSize = 100 * 1000;
}
File pcmFile = new File(outPath);
FileChannel writeChannel = new FileOutputStream(pcmFile).getChannel();
ByteBuffer buffer = ByteBuffer.allocateDirect(maxBufferSize);
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
while (true) {
int inIndex = mediaCodec.dequeueInputBuffer(1000);
if (inIndex >= 0) {
long sampleTimeUs = mediaExtractor.getSampleTime();
if (sampleTimeUs == -1) {
break;
}else if (sampleTimeUs < startTime) {
mediaExtractor.advance();
}else if (sampleTimeUs > endTime) {
break;
}
info.size= mediaExtractor.readSampleData(buffer, 0);
info.presentationTimeUs = sampleTimeUs;
info.flags = mediaExtractor.getSampleFlags();
byte[] content = new byte[buffer.remaining()];
buffer.get(content);
FileUtils.writeContent(content);
ByteBuffer inputBuffer = mediaCodec.getInputBuffer(inIndex);
inputBuffer.put(content);
mediaCodec.queueInputBuffer(inIndex, 0, info.size, info.presentationTimeUs, info.flags);
mediaExtractor.advance();
}
int outIndex = -1;
outIndex = mediaCodec.dequeueOutputBuffer(info, 1_000);
if (outIndex >= 0) {
ByteBuffer decodeOutputBuffer = mediaCodec.getOutputBuffer(outIndex);
writeChannel.write(decodeOutputBuffer);
mediaCodec.releaseOutputBuffer(outIndex, false);
}
}
writeChannel.close();
mediaExtractor.release();
mediaCodec.stop();
mediaCodec.release();
}
//找音频轨还是视频轨,
public static int selectTrack(MediaExtractor extractor,boolean audio) {
int numTracks = extractor.getTrackCount();
for (int i = 0; i < numTracks; i++) {
MediaFormat format = extractor.getTrackFormat(i);
String mime = format.getString(MediaFormat.KEY_MIME);
if (audio) {
if (mime.startsWith("audio")) {
return i;
}
}else {
if (mime.startsWith("video")) {
return i;
}
}
}
return -1;
}
}