1. 图像绘制基础
1.1 drawImage方法详解
Canvas提供了强大的drawImage()
方法来绘制图像,支持多种重载形式:
// 获取canvas和上下文
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
// 创建图像对象
const img = new Image();
img.onload = function() {
// 1. 基础绘制 - drawImage(image, x, y)
ctx.drawImage(img, 50, 50);
// 2. 缩放绘制 - drawImage(image, x, y, width, height)
ctx.drawImage(img, 200, 50, 100, 100);
// 3. 裁剪绘制 - drawImage(image, sx, sy, sWidth, sHeight, dx, dy, dWidth, dHeight)
ctx.drawImage(
img,
0, 0, 100, 100, // 源图像裁剪区域
350, 50, 150, 150 // 目标绘制区域
);
};
img.src = 'path/to/your/image.jpg';
1.2 图像加载管理
// 图像加载器类
class ImageLoader {
constructor() {
this.images = new Map();
this.loadPromises = new Map();
}
load(name, src) {
if (this.images.has(name)) {
return Promise.resolve(this.images.get(name));
}
if (this.loadPromises.has(name)) {
return this.loadPromises.get(name);
}
const promise = new Promise((resolve, reject) => {
const img = new Image();
img.onload = () => {
this.images.set(name, img);
this.loadPromises.delete(name);
resolve(img);
};
img.onerror = () => {
this.loadPromises.delete(name);
reject(new Error(`Failed to load image: ${src}`));
};
img.src = src;
});
this.loadPromises.set(name, promise);
return promise;
}
get(name) {
return this.images.get(name);
}
async loadMultiple(imageMap) {
const promises = Object.entries(imageMap).map(([name, src]) =>
this.load(name, src)
);
return Promise.all(promises);
}
}
// 使用示例
const imageLoader = new ImageLoader();
// 加载单个图像
imageLoader.load('player', 'images/player.png')
.then(img => {
ctx.drawImage(img, 100, 100);
})
.catch(error => {
console.error('图像加载失败:', error);
});
// 批量加载图像
const imagesToLoad = {
'background': 'images/background.jpg',
'sprite1': 'images/sprite1.png',
'sprite2': 'images/sprite2.png'
};
imageLoader.loadMultiple(imagesToLoad)
.then(() => {
console.log('所有图像加载完成');
// 开始绘制
drawScene();
})
.catch(error => {
console.error('图像加载失败:', error);
});
function drawScene() {
const bg = imageLoader.get('background');
const sprite1 = imageLoader.get('sprite1');
const sprite2 = imageLoader.get('sprite2');
ctx.drawImage(bg, 0, 0, canvas.width, canvas.height);
ctx.drawImage(sprite1, 50, 200);
ctx.drawImage(sprite2, 200, 200);
}
1.3 图像平铺和模式
// 图像平铺绘制
function tileImage(img, tileWidth, tileHeight) {
const cols = Math.ceil(canvas.width / tileWidth);
const rows = Math.ceil(canvas.height / tileHeight);
for (let row = 0; row < rows; row++) {
for (let col = 0; col < cols; col++) {
const x = col * tileWidth;
const y = row * tileHeight;
ctx.drawImage(img, x, y, tileWidth, tileHeight);
}
}
}
// 使用图案填充
function createImagePattern(img, repetition = 'repeat') {
const pattern = ctx.createPattern(img, repetition);
// repetition: 'repeat', 'repeat-x', 'repeat-y', 'no-repeat'
ctx.fillStyle = pattern;
ctx.fillRect(0, 0, canvas.width, canvas.height);
}
// 示例:创建纹理背景
const textureImg = new Image();
textureImg.onload = function() {
// 方法1:手动平铺
tileImage(textureImg, 64, 64);
// 方法2:使用图案
// createImagePattern(textureImg, 'repeat');
};
textureImg.src = 'images/texture.png';
2. 像素数据操作
2.1 获取和设置像素数据
// 获取像素数据
function getPixelData(x, y, width, height) {
return ctx.getImageData(x, y, width, height);
}
// 设置像素数据
function setPixelData(imageData, x, y) {
ctx.putImageData(imageData, x, y);
}
// 像素操作示例
function pixelManipulationDemo() {
// 绘制一个渐变矩形
const gradient = ctx.createLinearGradient(0, 0, 200, 0);
gradient.addColorStop(0, 'red');
gradient.addColorStop(1, 'blue');
ctx.fillStyle = gradient;
ctx.fillRect(50, 50, 200, 100);
// 获取像素数据
const imageData = ctx.getImageData(50, 50, 200, 100);
const data = imageData.data;
console.log('像素数据长度:', data.length); // width * height * 4
console.log('第一个像素 RGBA:', data[0], data[1], data[2], data[3]);
// 修改像素数据(反色效果)
for (let i = 0; i < data.length; i += 4) {
data[i] = 255 - data[i]; // Red
data[i + 1] = 255 - data[i + 1]; // Green
data[i + 2] = 255 - data[i + 2]; // Blue
// data[i + 3] 是 Alpha,保持不变
}
// 将修改后的数据绘制到新位置
ctx.putImageData(imageData, 300, 50);
}
pixelManipulationDemo();
2.2 像素级图像处理
// 像素处理工具类
class PixelProcessor {
constructor(ctx) {
this.ctx = ctx;
}
// 获取指定位置的像素颜色
getPixel(x, y) {
const imageData = this.ctx.getImageData(x, y, 1, 1);
const data = imageData.data;
return {
r: data[0],
g: data[1],
b: data[2],
a: data[3]
};
}
// 设置指定位置的像素颜色
setPixel(x, y, r, g, b, a = 255) {
const imageData = this.ctx.createImageData(1, 1);
const data = imageData.data;
data[0] = r;
data[1] = g;
data[2] = b;
data[3] = a;
this.ctx.putImageData(imageData, x, y);
}
// 应用滤镜到指定区域
applyFilter(x, y, width, height, filterFunction) {
const imageData = this.ctx.getImageData(x, y, width, height);
const data = imageData.data;
for (let i = 0; i < data.length; i += 4) {
const pixel = {
r: data[i],
g: data[i + 1],
b: data[i + 2],
a: data[i + 3]
};
const newPixel = filterFunction(pixel);
data[i] = newPixel.r;
data[i + 1] = newPixel.g;
data[i + 2] = newPixel.b;
data[i + 3] = newPixel.a;
}
this.ctx.putImageData(imageData, x, y);
}
// 创建噪点
generateNoise(x, y, width, height, intensity = 50) {
const imageData = this.ctx.createImageData(width, height);
const data = imageData.data;
for (let i = 0; i < data.length; i += 4) {
const noise = Math.random() * intensity;
data[i] = noise; // Red
data[i + 1] = noise; // Green
data[i + 2] = noise; // Blue
data[i + 3] = 255; // Alpha
}
this.ctx.putImageData(imageData, x, y);
}
}
// 使用示例
const processor = new PixelProcessor(ctx);
// 生成噪点
processor.generateNoise(50, 200, 200, 100, 100);
// 获取特定像素
const pixel = processor.getPixel(100, 250);
console.log('像素颜色:', pixel);
// 设置特定像素
processor.setPixel(150, 250, 255, 0, 0); // 红色像素
3. 图像滤镜效果
3.1 基础滤镜
// 滤镜效果类
class ImageFilters {
// 灰度滤镜
static grayscale(pixel) {
const gray = pixel.r * 0.299 + pixel.g * 0.587 + pixel.b * 0.114;
return { r: gray, g: gray, b: gray, a: pixel.a };
}
// 反色滤镜
static invert(pixel) {
return {
r: 255 - pixel.r,
g: 255 - pixel.g,
b: 255 - pixel.b,
a: pixel.a
};
}
// 亮度调整
static brightness(pixel, amount) {
return {
r: Math.min(255, Math.max(0, pixel.r + amount)),
g: Math.min(255, Math.max(0, pixel.g + amount)),
b: Math.min(255, Math.max(0, pixel.b + amount)),
a: pixel.a
};
}
// 对比度调整
static contrast(pixel, amount) {
const factor = (259 * (amount + 255)) / (255 * (259 - amount));
return {
r: Math.min(255, Math.max(0, factor * (pixel.r - 128) + 128)),
g: Math.min(255, Math.max(0, factor * (pixel.g - 128) + 128)),
b: Math.min(255, Math.max(0, factor * (pixel.b - 128) + 128)),
a: pixel.a
};
}
// 色调调整
static hue(pixel, amount) {
const { h, s, l } = ImageFilters.rgbToHsl(pixel.r, pixel.g, pixel.b);
const newH = (h + amount) % 360;
const { r, g, b } = ImageFilters.hslToRgb(newH, s, l);
return { r, g, b, a: pixel.a };
}
// 饱和度调整
static saturation(pixel, amount) {
const { h, s, l } = ImageFilters.rgbToHsl(pixel.r, pixel.g, pixel.b);
const newS = Math.min(1, Math.max(0, s + amount));
const { r, g, b } = ImageFilters.hslToRgb(h, newS, l);
return { r, g, b, a: pixel.a };
}
// RGB转HSL
static rgbToHsl(r, g, b) {
r /= 255;
g /= 255;
b /= 255;
const max = Math.max(r, g, b);
const min = Math.min(r, g, b);
let h, s, l = (max + min) / 2;
if (max === min) {
h = s = 0; // 无色
} else {
const d = max - min;
s = l > 0.5 ? d / (2 - max - min) : d / (max + min);
switch (max) {
case r: h = (g - b) / d + (g < b ? 6 : 0); break;
case g: h = (b - r) / d + 2; break;
case b: h = (r - g) / d + 4; break;
}
h /= 6;
}
return { h: h * 360, s, l };
}
// HSL转RGB
static hslToRgb(h, s, l) {
h /= 360;
const hue2rgb = (p, q, t) => {
if (t < 0) t += 1;
if (t > 1) t -= 1;
if (t < 1/6) return p + (q - p) * 6 * t;
if (t < 1/2) return q;
if (t < 2/3) return p + (q - p) * (2/3 - t) * 6;
return p;
};
let r, g, b;
if (s === 0) {
r = g = b = l; // 无色
} else {
const q = l < 0.5 ? l * (1 + s) : l + s - l * s;
const p = 2 * l - q;
r = hue2rgb(p, q, h + 1/3);
g = hue2rgb(p, q, h);
b = hue2rgb(p, q, h - 1/3);
}
return {
r: Math.round(r * 255),
g: Math.round(g * 255),
b: Math.round(b * 255)
};
}
}
// 滤镜应用示例
function applyFiltersDemo() {
// 绘制原始图像
const gradient = ctx.createRadialGradient(150, 350, 0, 150, 350, 100);
gradient.addColorStop(0, 'yellow');
gradient.addColorStop(0.5, 'orange');
gradient.addColorStop(1, 'red');
ctx.fillStyle = gradient;
ctx.fillRect(50, 300, 200, 100);
const processor = new PixelProcessor(ctx);
// 应用灰度滤镜
processor.applyFilter(300, 300, 200, 100, ImageFilters.grayscale);
// 应用反色滤镜
processor.applyFilter(550, 300, 200, 100, ImageFilters.invert);
// 应用亮度滤镜
processor.applyFilter(50, 450, 200, 100, (pixel) =>
ImageFilters.brightness(pixel, 50)
);
// 应用对比度滤镜
processor.applyFilter(300, 450, 200, 100, (pixel) =>
ImageFilters.contrast(pixel, 50)
);
}
applyFiltersDemo();
3.2 卷积滤镜
// 卷积滤镜类
class ConvolutionFilters {
// 应用卷积核
static applyKernel(imageData, kernel, divisor = 1, offset = 0) {
const data = imageData.data;
const width = imageData.width;
const height = imageData.height;
const output = new Uint8ClampedArray(data.length);
const kernelSize = Math.sqrt(kernel.length);
const half = Math.floor(kernelSize / 2);
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
let r = 0, g = 0, b = 0, a = 0;
for (let ky = 0; ky < kernelSize; ky++) {
for (let kx = 0; kx < kernelSize; kx++) {
const px = x + kx - half;
const py = y + ky - half;
if (px >= 0 && px < width && py >= 0 && py < height) {
const idx = (py * width + px) * 4;
const weight = kernel[ky * kernelSize + kx];
r += data[idx] * weight;
g += data[idx + 1] * weight;
b += data[idx + 2] * weight;
a += data[idx + 3] * weight;
}
}
}
const idx = (y * width + x) * 4;
output[idx] = Math.min(255, Math.max(0, r / divisor + offset));
output[idx + 1] = Math.min(255, Math.max(0, g / divisor + offset));
output[idx + 2] = Math.min(255, Math.max(0, b / divisor + offset));
output[idx + 3] = Math.min(255, Math.max(0, a / divisor + offset));
}
}
return new ImageData(output, width, height);
}
// 模糊滤镜
static blur(imageData) {
const kernel = [
1, 1, 1,
1, 1, 1,
1, 1, 1
];
return ConvolutionFilters.applyKernel(imageData, kernel, 9);
}
// 锐化滤镜
static sharpen(imageData) {
const kernel = [
0, -1, 0,
-1, 5, -1,
0, -1, 0
];
return ConvolutionFilters.applyKernel(imageData, kernel);
}
// 边缘检测
static edgeDetection(imageData) {
const kernel = [
-1, -1, -1,
-1, 8, -1,
-1, -1, -1
];
return ConvolutionFilters.applyKernel(imageData, kernel);
}
// 浮雕效果
static emboss(imageData) {
const kernel = [
-2, -1, 0,
-1, 1, 1,
0, 1, 2
];
return ConvolutionFilters.applyKernel(imageData, kernel, 1, 128);
}
}
// 卷积滤镜演示
function convolutionDemo() {
// 绘制测试图像
ctx.fillStyle = 'blue';
ctx.fillRect(50, 600, 100, 100);
ctx.fillStyle = 'red';
ctx.fillRect(100, 650, 100, 100);
ctx.fillStyle = 'green';
ctx.fillRect(75, 625, 100, 100);
const originalData = ctx.getImageData(50, 600, 150, 150);
// 应用模糊
const blurred = ConvolutionFilters.blur(originalData);
ctx.putImageData(blurred, 250, 600);
// 应用锐化
const sharpened = ConvolutionFilters.sharpen(originalData);
ctx.putImageData(sharpened, 450, 600);
// 应用边缘检测
const edges = ConvolutionFilters.edgeDetection(originalData);
ctx.putImageData(edges, 650, 600);
// 应用浮雕
const embossed = ConvolutionFilters.emboss(originalData);
ctx.putImageData(embossed, 850, 600);
}
convolutionDemo();
4. 图像合成与混合
4.1 全局合成操作
// 合成模式演示
function compositeOperationsDemo() {
const operations = [
'source-over', 'source-in', 'source-out', 'source-atop',
'destination-over', 'destination-in', 'destination-out', 'destination-atop',
'lighter', 'copy', 'xor', 'multiply', 'screen', 'overlay',
'darken', 'lighten', 'color-dodge', 'color-burn',
'hard-light', 'soft-light', 'difference', 'exclusion'
];
const cols = 6;
const cellWidth = 120;
const cellHeight = 100;
operations.forEach((operation, index) => {
const x = (index % cols) * cellWidth + 50;
const y = Math.floor(index / cols) * cellHeight + 800;
// 保存当前状态
ctx.save();
// 设置裁剪区域
ctx.beginPath();
ctx.rect(x, y, cellWidth - 10, cellHeight - 10);
ctx.clip();
// 绘制第一个形状(目标)
ctx.fillStyle = 'rgba(255, 0, 0, 0.8)';
ctx.fillRect(x + 10, y + 10, 50, 50);
// 设置合成操作
ctx.globalCompositeOperation = operation;
// 绘制第二个形状(源)
ctx.fillStyle = 'rgba(0, 0, 255, 0.8)';
ctx.fillRect(x + 30, y + 30, 50, 50);
// 重置合成操作
ctx.globalCompositeOperation = 'source-over';
// 绘制标签
ctx.fillStyle = 'black';
ctx.font = '10px Arial';
ctx.fillText(operation, x + 5, y + 85);
// 恢复状态
ctx.restore();
});
}
compositeOperationsDemo();
4.2 图像混合算法
// 图像混合类
class ImageBlending {
// 正常混合
static normal(base, overlay, alpha) {
return {
r: overlay.r * alpha + base.r * (1 - alpha),
g: overlay.g * alpha + base.g * (1 - alpha),
b: overlay.b * alpha + base.b * (1 - alpha),
a: Math.min(255, base.a + overlay.a * alpha)
};
}
// 正片叠底
static multiply(base, overlay) {
return {
r: (base.r * overlay.r) / 255,
g: (base.g * overlay.g) / 255,
b: (base.b * overlay.b) / 255,
a: base.a
};
}
// 滤色
static screen(base, overlay) {
return {
r: 255 - ((255 - base.r) * (255 - overlay.r)) / 255,
g: 255 - ((255 - base.g) * (255 - overlay.g)) / 255,
b: 255 - ((255 - base.b) * (255 - overlay.b)) / 255,
a: base.a
};
}
// 叠加
static overlay(base, overlay) {
const blendChannel = (base, overlay) => {
if (base < 128) {
return (2 * base * overlay) / 255;
} else {
return 255 - (2 * (255 - base) * (255 - overlay)) / 255;
}
};
return {
r: blendChannel(base.r, overlay.r),
g: blendChannel(base.g, overlay.g),
b: blendChannel(base.b, overlay.b),
a: base.a
};
}
// 柔光
static softLight(base, overlay) {
const blendChannel = (base, overlay) => {
const a = base / 255;
const b = overlay / 255;
let result;
if (b < 0.5) {
result = 2 * a * b + a * a * (1 - 2 * b);
} else {
result = 2 * a * (1 - b) + Math.sqrt(a) * (2 * b - 1);
}
return result * 255;
};
return {
r: blendChannel(base.r, overlay.r),
g: blendChannel(base.g, overlay.g),
b: blendChannel(base.b, overlay.b),
a: base.a
};
}
// 应用混合模式到图像数据
static applyBlendMode(baseImageData, overlayImageData, blendFunction, alpha = 1) {
const baseData = baseImageData.data;
const overlayData = overlayImageData.data;
const result = new Uint8ClampedArray(baseData.length);
for (let i = 0; i < baseData.length; i += 4) {
const base = {
r: baseData[i],
g: baseData[i + 1],
b: baseData[i + 2],
a: baseData[i + 3]
};
const overlay = {
r: overlayData[i],
g: overlayData[i + 1],
b: overlayData[i + 2],
a: overlayData[i + 3]
};
const blended = blendFunction(base, overlay);
result[i] = blended.r * alpha + base.r * (1 - alpha);
result[i + 1] = blended.g * alpha + base.g * (1 - alpha);
result[i + 2] = blended.b * alpha + base.b * (1 - alpha);
result[i + 3] = blended.a;
}
return new ImageData(result, baseImageData.width, baseImageData.height);
}
}
// 混合模式演示
function blendingDemo() {
// 创建基础图像
const gradient1 = ctx.createLinearGradient(0, 0, 200, 0);
gradient1.addColorStop(0, 'red');
gradient1.addColorStop(1, 'yellow');
ctx.fillStyle = gradient1;
ctx.fillRect(50, 1200, 200, 100);
const baseImageData = ctx.getImageData(50, 1200, 200, 100);
// 创建覆盖图像
const gradient2 = ctx.createLinearGradient(0, 0, 0, 100);
gradient2.addColorStop(0, 'blue');
gradient2.addColorStop(1, 'green');
ctx.fillStyle = gradient2;
ctx.fillRect(300, 1200, 200, 100);
const overlayImageData = ctx.getImageData(300, 1200, 200, 100);
// 应用不同的混合模式
const blendModes = [
{ name: 'Multiply', func: ImageBlending.multiply },
{ name: 'Screen', func: ImageBlending.screen },
{ name: 'Overlay', func: ImageBlending.overlay },
{ name: 'Soft Light', func: ImageBlending.softLight }
];
blendModes.forEach((mode, index) => {
const x = 550 + (index % 2) * 220;
const y = 1200 + Math.floor(index / 2) * 120;
const blended = ImageBlending.applyBlendMode(
baseImageData,
overlayImageData,
mode.func
);
ctx.putImageData(blended, x, y);
// 添加标签
ctx.fillStyle = 'black';
ctx.font = '14px Arial';
ctx.fillText(mode.name, x, y + 115);
});
}
blendingDemo();
5. 图像变换与扭曲
5.1 基础图像变换
// 图像变换类
class ImageTransform {
constructor(ctx) {
this.ctx = ctx;
}
// 图像翻转
flipImage(img, x, y, flipX = false, flipY = false) {
this.ctx.save();
this.ctx.translate(x + img.width / 2, y + img.height / 2);
this.ctx.scale(flipX ? -1 : 1, flipY ? -1 : 1);
this.ctx.drawImage(img, -img.width / 2, -img.height / 2);
this.ctx.restore();
}
// 图像旋转
rotateImage(img, x, y, angle) {
this.ctx.save();
this.ctx.translate(x + img.width / 2, y + img.height / 2);
this.ctx.rotate(angle);
this.ctx.drawImage(img, -img.width / 2, -img.height / 2);
this.ctx.restore();
}
// 图像倾斜
skewImage(img, x, y, skewX = 0, skewY = 0) {
this.ctx.save();
this.ctx.setTransform(
1, Math.tan(skewY),
Math.tan(skewX), 1,
x, y
);
this.ctx.drawImage(img, 0, 0);
this.ctx.restore();
}
// 透视变换(简单版本)
perspectiveImage(img, x, y, perspective = 0.5) {
this.ctx.save();
// 创建梯形效果
const width = img.width;
const height = img.height;
const topWidth = width * perspective;
const offset = (width - topWidth) / 2;
this.ctx.beginPath();
this.ctx.moveTo(x + offset, y);
this.ctx.lineTo(x + offset + topWidth, y);
this.ctx.lineTo(x + width, y + height);
this.ctx.lineTo(x, y + height);
this.ctx.closePath();
this.ctx.clip();
// 使用变换矩阵
this.ctx.setTransform(
perspective, 0,
(1 - perspective) / height, 1,
x + offset, y
);
this.ctx.drawImage(img, 0, 0, width, height);
this.ctx.restore();
}
}
// 变换演示
function transformDemo() {
// 创建测试图像
const testCanvas = document.createElement('canvas');
testCanvas.width = 100;
testCanvas.height = 100;
const testCtx = testCanvas.getContext('2d');
// 绘制测试图案
testCtx.fillStyle = 'red';
testCtx.fillRect(0, 0, 50, 50);
testCtx.fillStyle = 'blue';
testCtx.fillRect(50, 0, 50, 50);
testCtx.fillStyle = 'green';
testCtx.fillRect(0, 50, 50, 50);
testCtx.fillStyle = 'yellow';
testCtx.fillRect(50, 50, 50, 50);
const transformer = new ImageTransform(ctx);
// 原始图像
ctx.drawImage(testCanvas, 50, 1400);
// 水平翻转
transformer.flipImage(testCanvas, 200, 1400, true, false);
// 垂直翻转
transformer.flipImage(testCanvas, 350, 1400, false, true);
// 旋转45度
transformer.rotateImage(testCanvas, 500, 1400, Math.PI / 4);
// 倾斜变换
transformer.skewImage(testCanvas, 650, 1400, Math.PI / 6, 0);
// 透视变换
transformer.perspectiveImage(testCanvas, 800, 1400, 0.6);
}
transformDemo();
5.2 图像扭曲效果
// 图像扭曲类
class ImageWarp {
// 波浪扭曲
static wave(imageData, amplitude = 20, frequency = 0.02, direction = 'horizontal') {
const data = imageData.data;
const width = imageData.width;
const height = imageData.height;
const output = new Uint8ClampedArray(data.length);
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
let sourceX = x;
let sourceY = y;
if (direction === 'horizontal') {
sourceX = x + Math.sin(y * frequency) * amplitude;
} else {
sourceY = y + Math.sin(x * frequency) * amplitude;
}
sourceX = Math.max(0, Math.min(width - 1, Math.round(sourceX)));
sourceY = Math.max(0, Math.min(height - 1, Math.round(sourceY)));
const sourceIdx = (sourceY * width + sourceX) * 4;
const targetIdx = (y * width + x) * 4;
output[targetIdx] = data[sourceIdx];
output[targetIdx + 1] = data[sourceIdx + 1];
output[targetIdx + 2] = data[sourceIdx + 2];
output[targetIdx + 3] = data[sourceIdx + 3];
}
}
return new ImageData(output, width, height);
}
// 漩涡扭曲
static swirl(imageData, centerX, centerY, radius, angle) {
const data = imageData.data;
const width = imageData.width;
const height = imageData.height;
const output = new Uint8ClampedArray(data.length);
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
const dx = x - centerX;
const dy = y - centerY;
const distance = Math.sqrt(dx * dx + dy * dy);
if (distance < radius) {
const factor = (radius - distance) / radius;
const rotation = factor * angle;
const cos = Math.cos(rotation);
const sin = Math.sin(rotation);
const sourceX = Math.round(centerX + dx * cos - dy * sin);
const sourceY = Math.round(centerY + dx * sin + dy * cos);
if (sourceX >= 0 && sourceX < width && sourceY >= 0 && sourceY < height) {
const sourceIdx = (sourceY * width + sourceX) * 4;
const targetIdx = (y * width + x) * 4;
output[targetIdx] = data[sourceIdx];
output[targetIdx + 1] = data[sourceIdx + 1];
output[targetIdx + 2] = data[sourceIdx + 2];
output[targetIdx + 3] = data[sourceIdx + 3];
}
} else {
const sourceIdx = (y * width + x) * 4;
output[sourceIdx] = data[sourceIdx];
output[sourceIdx + 1] = data[sourceIdx + 1];
output[sourceIdx + 2] = data[sourceIdx + 2];
output[sourceIdx + 3] = data[sourceIdx + 3];
}
}
}
return new ImageData(output, width, height);
}
// 球面扭曲
static sphere(imageData, centerX, centerY, radius) {
const data = imageData.data;
const width = imageData.width;
const height = imageData.height;
const output = new Uint8ClampedArray(data.length);
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
const dx = x - centerX;
const dy = y - centerY;
const distance = Math.sqrt(dx * dx + dy * dy);
if (distance < radius) {
const z = Math.sqrt(radius * radius - distance * distance);
const factor = Math.atan2(distance, z) / (Math.PI / 2);
const sourceX = Math.round(centerX + dx * factor);
const sourceY = Math.round(centerY + dy * factor);
if (sourceX >= 0 && sourceX < width && sourceY >= 0 && sourceY < height) {
const sourceIdx = (sourceY * width + sourceX) * 4;
const targetIdx = (y * width + x) * 4;
output[targetIdx] = data[sourceIdx];
output[targetIdx + 1] = data[sourceIdx + 1];
output[targetIdx + 2] = data[sourceIdx + 2];
output[targetIdx + 3] = data[sourceIdx + 3];
}
} else {
const sourceIdx = (y * width + x) * 4;
output[sourceIdx] = data[sourceIdx];
output[sourceIdx + 1] = data[sourceIdx + 1];
output[sourceIdx + 2] = data[sourceIdx + 2];
output[sourceIdx + 3] = data[sourceIdx + 3];
}
}
}
return new ImageData(output, width, height);
}
}
// 扭曲效果演示
function warpDemo() {
// 创建测试图像
const gradient = ctx.createLinearGradient(0, 0, 200, 200);
gradient.addColorStop(0, 'red');
gradient.addColorStop(0.5, 'yellow');
gradient.addColorStop(1, 'blue');
ctx.fillStyle = gradient;
ctx.fillRect(50, 1550, 200, 200);
const originalData = ctx.getImageData(50, 1550, 200, 200);
// 波浪扭曲
const waved = ImageWarp.wave(originalData, 10, 0.05, 'horizontal');
ctx.putImageData(waved, 300, 1550);
// 漩涡扭曲
const swirled = ImageWarp.swirl(originalData, 100, 100, 80, Math.PI);
ctx.putImageData(swirled, 550, 1550);
// 球面扭曲
const sphered = ImageWarp.sphere(originalData, 100, 100, 90);
ctx.putImageData(sphered, 800, 1550);
}
warpDemo();
6. 实时图像处理
6.1 摄像头图像处理
// 摄像头图像处理类
class CameraProcessor {
constructor(videoElement, canvasElement) {
this.video = videoElement;
this.canvas = canvasElement;
this.ctx = canvasElement.getContext('2d');
this.isProcessing = false;
this.filters = [];
}
async initialize() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: { width: 640, height: 480 }
});
this.video.srcObject = stream;
this.video.play();
this.video.addEventListener('loadedmetadata', () => {
this.canvas.width = this.video.videoWidth;
this.canvas.height = this.video.videoHeight;
});
return true;
} catch (error) {
console.error('摄像头初始化失败:', error);
return false;
}
}
addFilter(filterFunction) {
this.filters.push(filterFunction);
}
removeFilter(filterFunction) {
const index = this.filters.indexOf(filterFunction);
if (index > -1) {
this.filters.splice(index, 1);
}
}
clearFilters() {
this.filters = [];
}
startProcessing() {
this.isProcessing = true;
this.processFrame();
}
stopProcessing() {
this.isProcessing = false;
}
processFrame() {
if (!this.isProcessing) return;
// 绘制视频帧
this.ctx.drawImage(this.video, 0, 0, this.canvas.width, this.canvas.height);
// 应用滤镜
if (this.filters.length > 0) {
const imageData = this.ctx.getImageData(0, 0, this.canvas.width, this.canvas.height);
let processedData = imageData;
this.filters.forEach(filter => {
processedData = filter(processedData);
});
this.ctx.putImageData(processedData, 0, 0);
}
requestAnimationFrame(() => this.processFrame());
}
captureFrame() {
return this.canvas.toDataURL('image/png');
}
}
// 使用示例(需要HTML中有video和canvas元素)
/*
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const processor = new CameraProcessor(video, canvas);
processor.initialize().then(success => {
if (success) {
// 添加实时滤镜
processor.addFilter(ImageFilters.grayscale);
processor.addFilter((imageData) =>
ImageFilters.brightness(imageData, 20)
);
processor.startProcessing();
}
});
*/
6.2 性能优化技巧
// 图像处理性能优化
class OptimizedImageProcessor {
constructor() {
this.workerPool = [];
this.maxWorkers = navigator.hardwareConcurrency || 4;
this.initWorkers();
}
initWorkers() {
// 创建Web Worker池
for (let i = 0; i < this.maxWorkers; i++) {
const worker = new Worker('image-worker.js');
this.workerPool.push({
worker: worker,
busy: false
});
}
}
getAvailableWorker() {
return this.workerPool.find(w => !w.busy);
}
async processImageAsync(imageData, filterType, options = {}) {
return new Promise((resolve, reject) => {
const workerInfo = this.getAvailableWorker();
if (!workerInfo) {
// 如果没有可用的Worker,使用主线程处理
resolve(this.processImageSync(imageData, filterType, options));
return;
}
workerInfo.busy = true;
workerInfo.worker.onmessage = (e) => {
workerInfo.busy = false;
resolve(e.data.imageData);
};
workerInfo.worker.onerror = (error) => {
workerInfo.busy = false;
reject(error);
};
workerInfo.worker.postMessage({
imageData: imageData,
filterType: filterType,
options: options
});
});
}
processImageSync(imageData, filterType, options = {}) {
switch (filterType) {
case 'grayscale':
return this.applyGrayscale(imageData);
case 'blur':
return this.applyBlur(imageData, options.radius || 1);
case 'sharpen':
return this.applySharpen(imageData);
default:
return imageData;
}
}
// 优化的灰度转换(使用查找表)
applyGrayscale(imageData) {
const data = imageData.data;
const length = data.length;
// 使用位运算优化
for (let i = 0; i < length; i += 4) {
const gray = (data[i] * 299 + data[i + 1] * 587 + data[i + 2] * 114) >> 10;
data[i] = gray;
data[i + 1] = gray;
data[i + 2] = gray;
}
return imageData;
}
// 优化的模糊算法(分离卷积)
applyBlur(imageData, radius) {
// 水平模糊
const horizontalBlurred = this.horizontalBlur(imageData, radius);
// 垂直模糊
return this.verticalBlur(horizontalBlurred, radius);
}
horizontalBlur(imageData, radius) {
const data = imageData.data;
const width = imageData.width;
const height = imageData.height;
const output = new Uint8ClampedArray(data.length);
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
let r = 0, g = 0, b = 0, a = 0;
let count = 0;
for (let dx = -radius; dx <= radius; dx++) {
const px = x + dx;
if (px >= 0 && px < width) {
const idx = (y * width + px) * 4;
r += data[idx];
g += data[idx + 1];
b += data[idx + 2];
a += data[idx + 3];
count++;
}
}
const idx = (y * width + x) * 4;
output[idx] = r / count;
output[idx + 1] = g / count;
output[idx + 2] = b / count;
output[idx + 3] = a / count;
}
}
return new ImageData(output, width, height);
}
verticalBlur(imageData, radius) {
const data = imageData.data;
const width = imageData.width;
const height = imageData.height;
const output = new Uint8ClampedArray(data.length);
for (let x = 0; x < width; x++) {
for (let y = 0; y < height; y++) {
let r = 0, g = 0, b = 0, a = 0;
let count = 0;
for (let dy = -radius; dy <= radius; dy++) {
const py = y + dy;
if (py >= 0 && py < height) {
const idx = (py * width + x) * 4;
r += data[idx];
g += data[idx + 1];
b += data[idx + 2];
a += data[idx + 3];
count++;
}
}
const idx = (y * width + x) * 4;
output[idx] = r / count;
output[idx + 1] = g / count;
output[idx + 2] = b / count;
output[idx + 3] = a / count;
}
}
return new ImageData(output, width, height);
}
applySharpen(imageData) {
const kernel = [
0, -1, 0,
-1, 5, -1,
0, -1, 0
];
return ConvolutionFilters.applyKernel(imageData, kernel);
}
destroy() {
this.workerPool.forEach(workerInfo => {
workerInfo.worker.terminate();
});
this.workerPool = [];
}
}
// 使用示例
const optimizedProcessor = new OptimizedImageProcessor();
// 异步处理大图像
optimizedProcessor.processImageAsync(largeImageData, 'blur', { radius: 3 })
.then(processedData => {
ctx.putImageData(processedData, 0, 0);
})
.catch(error => {
console.error('图像处理失败:', error);
});
7. 小结
本章深入介绍了Canvas的图像处理与像素操作:
- 图像绘制基础:drawImage方法、图像加载管理、图像平铺
- 像素数据操作:getImageData/putImageData、像素级处理
- 图像滤镜效果:基础滤镜、卷积滤镜、自定义滤镜
- 图像合成与混合:合成操作、混合算法
- 图像变换与扭曲:基础变换、扭曲效果
- 实时图像处理:摄像头处理、性能优化
下一章我们将学习变换与坐标系统,包括矩阵变换、坐标系操作和复杂的图形变换。
8. 练习题
- 创建一个图像编辑器,支持多种滤镜效果
- 实现一个实时美颜相机应用
- 制作一个图像拼图游戏
- 创建一个支持图层混合的简单图像编辑工具
- 实现一个图像马赛克效果生成器