# 音频分析
如果你想从音频里提取时间、频率或者其它数据,你需要 AnalyserNode。
# 分析节点
const audioAnalyserNode = audioCtx.createAnalyser();
analyserNode.fftSize = 1024;
const bufferLength = analyser2.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
analyser.getByteFrequencyData(dataArray);
// 此时 dataArray 中具有音频的频域信息
analyser.getByteFrequencyData(dataArray);
# 频域信息
const canvas = document.getElementById("canvas");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
const ctx = canvas.getContext("2d");
const WIDTH = canvas.width;
const HEIGHT = canvas.height;
const barWidth = (WIDTH / bufferLength) * 1.5;
function renderFrame() {
requestAnimationFrame(renderFrame);
analyser.getByteFrequencyData(dataArray);
ctx.clearRect(0, 0, WIDTH, HEIGHT);
for (var i = 0, x = 0; i < bufferLength; i++) {
const v = dataArray[i] / 255.0;
const barHeight = v * HEIGHT;
const r = barHeight + 25 * (i / bufferLength);
const g = 250 * (i / bufferLength);
const b = 50;
ctx.fillStyle = "rgb(" + r + "," + g + "," + b + ")";
ctx.fillRect(x, HEIGHT - barHeight, barWidth, barHeight);
x += barWidth + 2;
}
}
renderFrame();
# 时域信息
const canvas = document.getElementById("canvas");
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
const ctx = canvas.getContext("2d");
const WIDTH = canvas.width;
const HEIGHT = canvas.height;
const lineWidth = WIDTH / bufferLength; // 线条宽度根据缓冲区长度调整
function renderFrame() {
requestAnimationFrame(renderFrame);
analyser.getByteTimeDomainData(dataArray); // 获取时域数据
ctx.clearRect(0, 0, WIDTH, HEIGHT);
ctx.beginPath();
ctx.lineWidth = 2;
ctx.strokeStyle = "#000"; // 绿色波形
const sliceWidth = WIDTH / bufferLength;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
// 将数据值从0-255映射到canvas高度
const v = dataArray[i] / 255.0;
const y = v * HEIGHT;
if (i === 0) {
ctx.moveTo(x, y);
} else {
ctx.lineTo(x, y);
}
x += sliceWidth;
}
ctx.lineTo(WIDTH, HEIGHT / 2); // 结束线
ctx.stroke();
}
renderFrame();
# 利萨茹图
利萨茹(Lissajous)曲线(又称利萨茹图形、李萨如图形或鲍迪奇(Bowditch)曲线)是两个沿着互相垂直方向的正弦振动的合成的轨迹。
let renderFrame;
const audioContext = new AudioContext();
const source = audioContext.createMediaElementSource(audio);
const splitter = audioContext.createChannelSplitter(2);
const analyserX = audioContext.createAnalyser();
const analyserY = audioContext.createAnalyser();
analyserX.fftSize = 512;
analyserY.fftSize = 512;
source.connect(audioContext.destination);
source.connect(splitter);
splitter.connect(analyserX, 0)
splitter.connect(analyserY, 1)
const bufferLength = analyserX.frequencyBinCount;
const timeDomainX = new Uint8Array(bufferLength);
const timeDomainY = new Uint8Array(bufferLength);
function drawCanvas() {
const rect = container.getBoundingClientRect();
canvas.width = rect.width;
canvas.height = rect.height;
const ctx = canvas.getContext("2d");
const WIDTH = canvas.width;
const HEIGHT = canvas.height;
return function renderFrame() {
requestAnimationFrame(renderFrame);
analyserX.getByteTimeDomainData(timeDomainX)
analyserY.getByteTimeDomainData(timeDomainY)
ctx.fillStyle = "#181818";
ctx.fillRect(0, 0, WIDTH, HEIGHT);
ctx.lineWidth = 2
ctx.strokeStyle = "#fff";
ctx.beginPath();
for (let i = 0; i < bufferLength; i++) {
const x = (((timeDomainX[i] - 128) / 255) * (WIDTH)) + (WIDTH / 2);
const y = (((timeDomainY[i] - 128) / 255) * (HEIGHT)) + (HEIGHT / 2);
ctx.lineTo(x, y);
}
ctx.stroke();
}
}