[GPUImageContext useImageProcessingContext];
CVImageBufferRef cameraFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
int bufferWidth = (int) CVPixelBufferGetWidth(cameraFrame);
int bufferHeight = (int) CVPixelBufferGetHeight(cameraFrame);
CVPixelBufferLockBaseAddress(cameraFrame, 0);
int bytesPerRow = (int) CVPixelBufferGetBytesPerRow(cameraFrame);
GPUImageFramebuffer *outputFramebuffer = [[GPUImageContext sharedFramebufferCache] fetchFramebufferForSize:CGSizeMake(bytesPerRow / 4, bufferHeight) onlyTexture:YES];
[outputFramebuffer activateFramebuffer];
//gl绘制到buffer的pix中
glBindTexture(GL_TEXTURE_2D, [outputFramebuffer texture]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, bytesPerRow / 4, bufferHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, CVPixelBufferGetBaseAddress(cameraFrame));
CVPixelBufferUnlockBaseAddress(cameraFrame, 0);
CMTime time = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
//模仿GPUImage的调用顺序
[_filter setInputSize:CGSizeMake(bufferWidth, bufferHeight) atIndex:0];
[_filter setInputFramebuffer:outputFramebuffer atIndex:0];
[_filter newFrameReadyAtTime:time atIndex:0];
不谢,记住采集的要用BGRA
CVPixelBufferRef->滤镜-> CVPixelBufferRef因为在做直播,最近突然有个想法,自己做做滤镜试试看,实验了很多方法,下面谈谈我从网上了解的结合自己的想法,给大家分享一下。 首先,说到滤镜肯定首先想到iOS上著名的开源库GP...