获取设备信息
获取录音的频谱数据
绘制频谱图
封装 loadDevices.js
/**
* 是否支持录音
*/
const recordingSupport = () => {
const scope = navigator.mediaDevices || {};
if (!scope.getUserMedia) {
scope = navigator
scope.getUserMedia || (scope.getUserMedia = scope.webkitGetUserMedia || scope.mozGetUserMedia || scope.msGetUserMedia);
}
if (!scope.getUserMedia) {
return false
}
return scope
}
// 获取麦克风权限
export const getUserMediaPermission = () => {
return new Promise((resolve, reject) => {
const mediaDevices = recordingSupport()
if (mediaDevices.getUserMedia) {
let constraints = { audio: true }
mediaDevices.getUserMedia(constraints).then(resolve, reject);
} else { reject(false) } // 浏览器不支持录音
})
}
function checkMime() {
var types = [
"audio/mpeg",
"audio/webm",
"audio/mp4",
"audio/wav",
"audio/ogg",
"audio/flac",
"audio/m4a",
"audio/mp3",
"audio/mpga",
"audio/oga",
];
let first;
for (var i in types) {
// 判断当前浏览器支持哪种
let supported = MediaRecorder.isTypeSupported(types[i]);
if (supported && !first) {
console.log("Is " + types[i] + " supported? " + (supported ? "Yes!" : "Nope :("));
first = types[i];
}
}
return first;
}
let streams = []
let stopDraw = false
/**
* 释放资源
*/
export const devicesDispose = () => {
console.log('devicesDispose-释放资源');
stopDraw = true
streams.forEach(e => {
e.getTracks().forEach(track => track.stop());
})
}
export const getAudioContext = () => window.AudioContext ||
window.webkitAudioContext ||
window.mozAudioContext ||
window.msAudioContext;
export default function loadDevices(options = {}) {
const { readover = () => { }, change = () => { }, stop = () => { } } = options
let analyser;
let mediaRecorder;
let dataArray;
let audioChunks = [];
try {
const draw = () => {
if (stopDraw) return
requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
change(dataArray);
};
let mimeType = checkMime();
getUserMediaPermission().then((stream) => {
streams.push(streams)
// 创建记录器
mediaRecorder = new MediaRecorder(stream, { mimeType });
// 音频数据发生变化时收集音频片段,用于合成音频文件
mediaRecorder.addEventListener("dataavailable", (event) => {
console.log("mediaRecorder-dataavailable:", event);
audioChunks.push(event.data);
});
// // 监听音频开始录制
// mediaRecorder.addEventListener('start', () => {
// console.log("mediaRecorder-start:");
// audioChunks = []
// })
// 音频录制结束回调
mediaRecorder.addEventListener("stop", () => {
console.log("mediaRecorder-end:", audioChunks);
const audioBlob = new Blob(audioChunks, { type: "audio/mp4" }); // wav webm mp4
stop(audioBlob);
// 清空 chunks 以便下一次录音
audioChunks = []
});
// 获取音频数据
const audioContext = new getAudioContext()();
const source = audioContext.createMediaStreamSource(stream);
// 通过AnalyserNode对象的getByteTimeDomainData方法来获取音频数据的波形形式:
// 获取音频时间和频率数据
analyser = audioContext.createAnalyser();
// 定义长度
analyser.fftSize = 2048; // 可以调整这个值来改变细节
const bufferLength = analyser.frequencyBinCount;
dataArray = new Uint8Array(bufferLength);
// 合并流数据
source.connect(analyser);
draw()
readover(mediaRecorder)
}).catch((err) => {
console.log("stream-errr", err);
});
} catch (err) {
console.log("mediaDevices-errr", err);
}
}
示例
import { onMounted, onUnmounted } from "vue";
import loadDevices, {
devicesDispose,
getAudioContext,
} from "../compositions/VerbalChat/loadDevices";
let mediaRecorder;
const speak = ref(false);
// 停止录制
const uploadAudio = (blob) => {
// others 获取录音数据之后后续处理 上传
// const formData = new FormData();
// formData.append("file", blob);
// 接口formData上传
};
// 绘制方法
const draw = ({ data }) => {
// 调用子组件的绘制方法,传递数据
// verCanvas.value && verCanvas.value.draw({ data });
};
const btnClick = () => {
if (!speak.value) {
console.log("开始录制");
speak.value = true;
mediaRecorder && mediaRecorder.start();
} else {
console.log("停止录制");
speak.value = false;
mediaRecorder && mediaRecorder.stop();
}
};
onMounted(() => {
loadDevices({
readover: (r) => (mediaRecorder = r),
change: (dataArray) => {
if (speak.value) {
// 处于录制中
draw({ data: dataArray });
}
},
stop: (blob) => uploadAudio(blob),
});
});
onUnmounted(()=>devicesDispose())