版本记录
版本号 | 时间 |
---|---|
V1.0 | 2017.12.24 |
前言
对于做过音视频的开发者,编解码都不陌生,接下来这几篇就详细的看一下音视频编解码相关知识。感兴趣的可以看这几篇文章。
1. 音视频编解码(一) —— H264基本概览(一)
2. 音视频编解码(二) —— iOS中的H264硬编解码的实现(一)
H264编解码示例
下面我们就看一下H264相关的编解码示例。下面看一下代码。
1. JJH264VC.h
#import <UIKit/UIKit.h>
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
#import "JJH264Decoder.h"
#import "JJH264Encoder.h"
@interface JJH264VC : UIViewController <AVCaptureVideoDataOutputSampleBufferDelegate, JJH264EncoderDelegate, JJH264DecoderDelegate>
@end
2. JJH264VC.m
#import "JJH264VC.h"
#import "JJEAGLLayer.h"
#import "JJConfigFile.h"
@interface JJH264VC ()
{
AVCaptureSession *captureSession;
AVCaptureConnection* connectionVideo;
AVCaptureDevice *cameraDeviceB;
AVCaptureDevice *cameraDeviceF;
BOOL cameraDeviceIsF;
JJH264Encoder *h264Encoder;
AVCaptureVideoPreviewLayer *recordLayer;
JJH264Decoder *h264Decoder;
JJEAGLLayer *playLayer;
}
@end
@implementation JJH264VC
#pragma mark - Override Base Function
- (void)viewDidLoad
{
[super viewDidLoad];
self.view.frame = [UIScreen mainScreen].bounds;
self.view.backgroundColor = [UIColor whiteColor];
cameraDeviceIsF = YES;
NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in videoDevices) {
if (device.position == AVCaptureDevicePositionFront) {
cameraDeviceF = device;
}
else if(device.position == AVCaptureDevicePositionBack)
{
cameraDeviceB = device;
}
}
h264Encoder = [JJH264Encoder alloc];
[h264Encoder initWithConfiguration];
[h264Encoder initEncode:h264outputWidth height:h264outputHeight];
h264Encoder.delegate = self;
h264Decoder = [[JJH264Decoder alloc] init];
h264Decoder.delegate = self;
UIButton *switchButton = [[UIButton alloc] initWithFrame:CGRectMake(30,80,100,40)];
[switchButton setTitle:@"打开摄像头" forState:UIControlStateNormal];
[switchButton setTitle:@"关闭摄像头" forState:UIControlStateSelected];
[switchButton setBackgroundColor:[UIColor lightGrayColor]];
[switchButton setTitleColor:[UIColor blueColor] forState:UIControlStateNormal];
[switchButton addTarget:self
action:@selector(switchButtonDidClick:)
forControlEvents:UIControlEventTouchUpInside];
switchButton.selected = NO;
[self.view addSubview:switchButton];
UIButton *frontBackBtn = [[UIButton alloc] initWithFrame:CGRectMake(220,80,120,40)];
[frontBackBtn setTitle:@"切换后摄像头" forState:UIControlStateNormal];
[frontBackBtn setTitle:@"切换前摄像头" forState:UIControlStateSelected];
[frontBackBtn setBackgroundColor:[UIColor lightGrayColor]];
[frontBackBtn setTitleColor:[UIColor blueColor] forState:UIControlStateNormal];
[frontBackBtn addTarget:self
action:@selector(frontBackButtonDidClick:)
forControlEvents:UIControlEventTouchUpInside];
frontBackBtn.selected = NO;
[self.view addSubview:frontBackBtn];
playLayer = [[JJEAGLLayer alloc] initWithFrame:CGRectMake(200, 150, 160, 300)];
playLayer.backgroundColor = [UIColor blackColor].CGColor;
}
#pragma mark - Object Private Function
- (void) initCamera:(BOOL)type
{
NSError *deviceError;
AVCaptureDeviceInput *inputCameraDevice;
if (type==false)
{
inputCameraDevice = [AVCaptureDeviceInput deviceInputWithDevice:cameraDeviceB error:&deviceError];
}
else
{
inputCameraDevice = [AVCaptureDeviceInput deviceInputWithDevice:cameraDeviceF error:&deviceError];
}
AVCaptureVideoDataOutput *outputVideoDevice = [[AVCaptureVideoDataOutput alloc] init];
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* val = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:val forKey:key];
outputVideoDevice.videoSettings = videoSettings;
[outputVideoDevice setSampleBufferDelegate:self queue:dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0)];
captureSession = [[AVCaptureSession alloc] init];
[captureSession addInput:inputCameraDevice];
[captureSession addOutput:outputVideoDevice];
[captureSession beginConfiguration];
[captureSession setSessionPreset:[NSString stringWithString:AVCaptureSessionPreset1280x720]];
connectionVideo = [outputVideoDevice connectionWithMediaType:AVMediaTypeVideo];
#if TARGET_OS_IPHONE
[self setRelativeVideoOrientation];
NSNotificationCenter* notify = [NSNotificationCenter defaultCenter];
[notify addObserver:self
selector:@selector(statusBarOrientationDidChange:)
name:@"StatusBarOrientationDidChange"
object:nil];
#endif
[captureSession commitConfiguration];
recordLayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
[recordLayer setVideoGravity:AVLayerVideoGravityResizeAspect];
}
- (void)startCamera
{
recordLayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
[recordLayer setVideoGravity:AVLayerVideoGravityResizeAspect];
recordLayer.frame = CGRectMake(0, 120, 160, 300);
[self.view.layer addSublayer:recordLayer];
[captureSession startRunning];
[self.view.layer addSublayer:playLayer];
}
- (void)stopCamera
{
[captureSession stopRunning];
[recordLayer removeFromSuperlayer];
[playLayer removeFromSuperlayer];
}
#pragma mark - Action && Notification
- (void)switchButtonDidClick:(UIButton *)btn
{
btn.selected = !btn.selected;
if (btn.selected==YES)
{
[self stopCamera];
[self initCamera:cameraDeviceIsF];
[self startCamera];
}
else
{
[self stopCamera];
}
}
- (void)frontBackButtonDidClick:(UIButton *)btn
{
btn.selected = !btn.selected;
if (captureSession.isRunning==YES)
{
cameraDeviceIsF = !cameraDeviceIsF;
NSLog(@"变位置");
[self stopCamera];
[self initCamera:cameraDeviceIsF];
[self startCamera];
}
}
#pragma mark - AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput*)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection*)connection
{
if (connection==connectionVideo)
{
[h264Encoder encode:sampleBuffer];
}
}
#pragma mark - JJH264EncoderDelegate 编码回调
- (void)gotSpsPps:(NSData*)sps pps:(NSData*)pps
{
const char bytes[] = "\x00\x00\x00\x01";
size_t length = (sizeof bytes) - 1;
NSData *ByteHeader = [NSData dataWithBytes:bytes length:length];
//发sps
NSMutableData *h264Data = [[NSMutableData alloc] init];
[h264Data appendData:ByteHeader];
[h264Data appendData:sps];
[h264Decoder decodeNalu:(uint8_t *)[h264Data bytes] withSize:(uint32_t)h264Data.length];
//发pps
[h264Data resetBytesInRange:NSMakeRange(0, [h264Data length])];
[h264Data setLength:0];
[h264Data appendData:ByteHeader];
[h264Data appendData:pps];
[h264Decoder decodeNalu:(uint8_t *)[h264Data bytes] withSize:(uint32_t)h264Data.length];
}
- (void)gotEncodedData:(NSData*)data isKeyFrame:(BOOL)isKeyFrame
{
const char bytes[] = "\x00\x00\x00\x01";
size_t length = (sizeof bytes) - 1;
NSData *ByteHeader = [NSData dataWithBytes:bytes length:length];
NSMutableData *h264Data = [[NSMutableData alloc] init];
[h264Data appendData:ByteHeader];
[h264Data appendData:data];
[h264Decoder decodeNalu:(uint8_t *)[h264Data bytes] withSize:(uint32_t)h264Data.length];
}
#pragma mark - JJH264DecoderDelegate 解码回调
- (void)displayDecodedFrame:(CVImageBufferRef )imageBuffer
{
if(imageBuffer)
{
playLayer.pixelBuffer = imageBuffer;
CVPixelBufferRelease(imageBuffer);
}
}
#pragma mark - 方向设置
#if TARGET_OS_IPHONE
- (void)statusBarOrientationDidChange:(NSNotification*)notification
{
[self setRelativeVideoOrientation];
}
- (void)setRelativeVideoOrientation
{
switch ([[UIDevice currentDevice] orientation]) {
case UIInterfaceOrientationPortrait:
#if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
case UIInterfaceOrientationUnknown:
#endif
recordLayer.connection.videoOrientation = AVCaptureVideoOrientationPortrait;
connectionVideo.videoOrientation = AVCaptureVideoOrientationPortrait;
break;
case UIInterfaceOrientationPortraitUpsideDown:
recordLayer.connection.videoOrientation = AVCaptureVideoOrientationPortraitUpsideDown;
connectionVideo.videoOrientation = AVCaptureVideoOrientationPortraitUpsideDown;
break;
case UIInterfaceOrientationLandscapeLeft:
recordLayer.connection.videoOrientation = AVCaptureVideoOrientationLandscapeLeft;
connectionVideo.videoOrientation = AVCaptureVideoOrientationLandscapeLeft;
break;
case UIInterfaceOrientationLandscapeRight:
recordLayer.connection.videoOrientation = AVCaptureVideoOrientationLandscapeRight;
connectionVideo.videoOrientation = AVCaptureVideoOrientationLandscapeRight;
break;
default:
break;
}
}
#endif
@end
3. JJEAGLLayer.h
#import <QuartzCore/QuartzCore.h>
#include <CoreVideo/CoreVideo.h>
@interface JJEAGLLayer : CAEAGLLayer
@property CVPixelBufferRef pixelBuffer;
- (id)initWithFrame:(CGRect)frame;
- (void)resetRenderBuffer;
@end
4. JJEAGLLayer.m
#import "JJEAGLLayer.h"
#import <AVFoundation/AVUtilities.h>
#import <mach/mach_time.h>
#include <AVFoundation/AVFoundation.h>
#import <UIKit/UIScreen.h>
#include <OpenGLES/EAGL.h>
#include <OpenGLES/ES2/gl.h>
#include <OpenGLES/ES2/glext.h>
// Uniform index.
enum
{
UNIFORM_Y,
UNIFORM_UV,
UNIFORM_ROTATION_ANGLE,
UNIFORM_COLOR_CONVERSION_MATRIX,
NUM_UNIFORMS
};
GLint uniforms[NUM_UNIFORMS];
// Attribute index.
enum
{
ATTRIB_VERTEX,
ATTRIB_TEXCOORD,
NUM_ATTRIBUTES
};
// Color Conversion Constants (YUV to RGB) including adjustment from 16-235/16-240 (video range)
// BT.601, which is the standard for SDTV.
static const GLfloat kColorConversion601[] = {
1.164, 1.164, 1.164,
0.0, -0.392, 2.017,
1.596, -0.813, 0.0,
};
// BT.709, which is the standard for HDTV.
static const GLfloat kColorConversion709[] = {
1.164, 1.164, 1.164,
0.0, -0.213, 2.112,
1.793, -0.533, 0.0,
};
@interface JJEAGLLayer()
{
// The pixel dimensions of the CAEAGLLayer.
GLint _backingWidth;
GLint _backingHeight;
EAGLContext *_context;
CVOpenGLESTextureRef _lumaTexture;
CVOpenGLESTextureRef _chromaTexture;
GLuint _frameBufferHandle;
GLuint _colorBufferHandle;
const GLfloat *_preferredConversion;
}
@property GLuint program;
@end
@implementation JJEAGLLayer
@synthesize pixelBuffer = _pixelBuffer;
#pragma mark - Override Base Function
- (void)dealloc
{
if (!_context || ![EAGLContext setCurrentContext:_context]) {
return;
}
[self cleanUpTextures];
if(_pixelBuffer) {
CVPixelBufferRelease(_pixelBuffer);
}
if (self.program) {
glDeleteProgram(self.program);
self.program = 0;
}
if(_context) {
//[_context release];
_context = nil;
}
//[super dealloc];
}
#pragma mark - Object Private Function
- (void)displayPixelBuffer:(CVPixelBufferRef)pixelBuffer width:(uint32_t)frameWidth height:(uint32_t)frameHeight
{
if (!_context || ![EAGLContext setCurrentContext:_context]) {
return;
}
if(pixelBuffer == NULL) {
NSLog(@"Pixel buffer is null");
return;
}
CVReturn err;
size_t planeCount = CVPixelBufferGetPlaneCount(pixelBuffer);
/*
Use the color attachment of the pixel buffer to determine the appropriate color conversion matrix.
*/
CFTypeRef colorAttachments = CVBufferGetAttachment(pixelBuffer, kCVImageBufferYCbCrMatrixKey, NULL);
if (CFStringCompare(colorAttachments, kCVImageBufferYCbCrMatrix_ITU_R_601_4, 0) == kCFCompareEqualTo) {
_preferredConversion = kColorConversion601;
}
else {
_preferredConversion = kColorConversion709;
}
/*
CVOpenGLESTextureCacheCreateTextureFromImage will create GLES texture optimally from CVPixelBufferRef.
*/
/*
Create Y and UV textures from the pixel buffer. These textures will be drawn on the frame buffer Y-plane.
*/
CVOpenGLESTextureCacheRef _videoTextureCache;
// Create CVOpenGLESTextureCacheRef for optimal CVPixelBufferRef to GLES texture conversion.
err = CVOpenGLESTextureCacheCreate(kCFAllocatorDefault, NULL, _context, NULL, &_videoTextureCache);
if (err != noErr) {
NSLog(@"Error at CVOpenGLESTextureCacheCreate %d", err);
return;
}
glActiveTexture(GL_TEXTURE0);
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
_videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RED_EXT,
frameWidth,
frameHeight,
GL_RED_EXT,
GL_UNSIGNED_BYTE,
0,
&_lumaTexture);
if (err) {
NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
glBindTexture(CVOpenGLESTextureGetTarget(_lumaTexture), CVOpenGLESTextureGetName(_lumaTexture));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if(planeCount == 2) {
// UV-plane.
glActiveTexture(GL_TEXTURE1);
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
_videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RG_EXT,
frameWidth / 2,
frameHeight / 2,
GL_RG_EXT,
GL_UNSIGNED_BYTE,
1,
&_chromaTexture);
if (err) {
NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
glBindTexture(CVOpenGLESTextureGetTarget(_chromaTexture), CVOpenGLESTextureGetName(_chromaTexture));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
glBindFramebuffer(GL_FRAMEBUFFER, _frameBufferHandle);
// Set the view port to the entire view.
glViewport(0, 0, _backingWidth, _backingHeight);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Use shader program.
glUseProgram(self.program);
// glUniform1f(uniforms[UNIFORM_LUMA_THRESHOLD], 1);
// glUniform1f(uniforms[UNIFORM_CHROMA_THRESHOLD], 1);
glUniform1f(uniforms[UNIFORM_ROTATION_ANGLE], 0);
glUniformMatrix3fv(uniforms[UNIFORM_COLOR_CONVERSION_MATRIX], 1, GL_FALSE, _preferredConversion);
// Set up the quad vertices with respect to the orientation and aspect ratio of the video.
CGRect viewBounds = self.bounds;
CGSize contentSize = CGSizeMake(frameWidth, frameHeight);
CGRect vertexSamplingRect = AVMakeRectWithAspectRatioInsideRect(contentSize, viewBounds);
// Compute normalized quad coordinates to draw the frame into.
CGSize normalizedSamplingSize = CGSizeMake(0.0, 0.0);
CGSize cropScaleAmount = CGSizeMake(vertexSamplingRect.size.width/viewBounds.size.width,
vertexSamplingRect.size.height/viewBounds.size.height);
// Normalize the quad vertices.
if (cropScaleAmount.width > cropScaleAmount.height) {
normalizedSamplingSize.width = 1.0;
normalizedSamplingSize.height = cropScaleAmount.height/cropScaleAmount.width;
}
else {
normalizedSamplingSize.width = cropScaleAmount.width/cropScaleAmount.height;
normalizedSamplingSize.height = 1.0;;
}
/*
The quad vertex data defines the region of 2D plane onto which we draw our pixel buffers.
Vertex data formed using (-1,-1) and (1,1) as the bottom left and top right coordinates respectively, covers the entire screen.
*/
GLfloat quadVertexData [] = {
-1 * normalizedSamplingSize.width, -1 * normalizedSamplingSize.height,
normalizedSamplingSize.width, -1 * normalizedSamplingSize.height,
-1 * normalizedSamplingSize.width, normalizedSamplingSize.height,
normalizedSamplingSize.width, normalizedSamplingSize.height,
};
// Update attribute values.
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, quadVertexData);
glEnableVertexAttribArray(ATTRIB_VERTEX);
/*
The texture vertices are set up such that we flip the texture vertically. This is so that our top left origin buffers match OpenGL's bottom left texture coordinate system.
*/
CGRect textureSamplingRect = CGRectMake(0, 0, 1, 1);
GLfloat quadTextureData[] = {
CGRectGetMinX(textureSamplingRect), CGRectGetMaxY(textureSamplingRect),
CGRectGetMaxX(textureSamplingRect), CGRectGetMaxY(textureSamplingRect),
CGRectGetMinX(textureSamplingRect), CGRectGetMinY(textureSamplingRect),
CGRectGetMaxX(textureSamplingRect), CGRectGetMinY(textureSamplingRect)
};
glVertexAttribPointer(ATTRIB_TEXCOORD, 2, GL_FLOAT, 0, 0, quadTextureData);
glEnableVertexAttribArray(ATTRIB_TEXCOORD);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindRenderbuffer(GL_RENDERBUFFER, _colorBufferHandle);
[_context presentRenderbuffer:GL_RENDERBUFFER];
[self cleanUpTextures];
// Periodic texture cache flush every frame
CVOpenGLESTextureCacheFlush(_videoTextureCache, 0);
if(_videoTextureCache) {
CFRelease(_videoTextureCache);
}
}
#pragma mark - Object Public Function
- (instancetype)initWithFrame:(CGRect)frame
{
self = [super init];
if (self) {
CGFloat scale = [[UIScreen mainScreen] scale];
self.contentsScale = scale;
self.opaque = TRUE;
self.drawableProperties = @{ kEAGLDrawablePropertyRetainedBacking :[NSNumber numberWithBool:YES]};
[self setFrame:frame];
// Set the context into which the frames will be drawn.
_context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
if (!_context) {
return nil;
}
// Set the default conversion to BT.709, which is the standard for HDTV.
_preferredConversion = kColorConversion709;
[self setupGL];
}
return self;
}
#pragma mark - Getter && Setter
- (CVPixelBufferRef)pixelBuffer
{
return _pixelBuffer;
}
- (void)setPixelBuffer:(CVPixelBufferRef)pb
{
if(_pixelBuffer) {
CVPixelBufferRelease(_pixelBuffer);
}
_pixelBuffer = CVPixelBufferRetain(pb);
int frameWidth = (int)CVPixelBufferGetWidth(_pixelBuffer);
int frameHeight = (int)CVPixelBufferGetHeight(_pixelBuffer);
[self displayPixelBuffer:_pixelBuffer width:frameWidth height:frameHeight];
}
// OpenGL setup
- (void)setupGL
{
if (!_context || ![EAGLContext setCurrentContext:_context]) {
return;
}
[self setupBuffers];
[self loadShaders];
glUseProgram(self.program);
// 0 and 1 are the texture IDs of _lumaTexture and _chromaTexture respectively.
glUniform1i(uniforms[UNIFORM_Y], 0);
glUniform1i(uniforms[UNIFORM_UV], 1);
glUniform1f(uniforms[UNIFORM_ROTATION_ANGLE], 0);
glUniformMatrix3fv(uniforms[UNIFORM_COLOR_CONVERSION_MATRIX], 1, GL_FALSE, _preferredConversion);
}
// Utilities
- (void)setupBuffers
{
glDisable(GL_DEPTH_TEST);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), 0);
glEnableVertexAttribArray(ATTRIB_TEXCOORD);
glVertexAttribPointer(ATTRIB_TEXCOORD, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), 0);
[self createBuffers];
}
- (void)createBuffers
{
glGenFramebuffers(1, &_frameBufferHandle);
glBindFramebuffer(GL_FRAMEBUFFER, _frameBufferHandle);
glGenRenderbuffers(1, &_colorBufferHandle);
glBindRenderbuffer(GL_RENDERBUFFER, _colorBufferHandle);
[_context renderbufferStorage:GL_RENDERBUFFER fromDrawable:self];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &_backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &_backingHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, _colorBufferHandle);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
NSLog(@"Failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
}
}
- (void)releaseBuffers
{
if(_frameBufferHandle) {
glDeleteFramebuffers(1, &_frameBufferHandle);
_frameBufferHandle = 0;
}
if(_colorBufferHandle) {
glDeleteRenderbuffers(1, &_colorBufferHandle);
_colorBufferHandle = 0;
}
}
- (void)resetRenderBuffer
{
if (!_context || ![EAGLContext setCurrentContext:_context]) {
return;
}
[self releaseBuffers];
[self createBuffers];
}
- (void)cleanUpTextures
{
if (_lumaTexture) {
CFRelease(_lumaTexture);
_lumaTexture = NULL;
}
if (_chromaTexture) {
CFRelease(_chromaTexture);
_chromaTexture = NULL;
}
}
#pragma mark - OpenGL ES 2 shader compilation
const GLchar *shader_fsh = (const GLchar*)"varying highp vec2 texCoordVarying;"
"precision mediump float;"
"uniform sampler2D SamplerY;"
"uniform sampler2D SamplerUV;"
"uniform mat3 colorConversionMatrix;"
"void main()"
"{"
" mediump vec3 yuv;"
" lowp vec3 rgb;"
// Subtract constants to map the video range start at 0
" yuv.x = (texture2D(SamplerY, texCoordVarying).r - (16.0/255.0));"
" yuv.yz = (texture2D(SamplerUV, texCoordVarying).rg - vec2(0.5, 0.5));"
" rgb = colorConversionMatrix * yuv;"
" gl_FragColor = vec4(rgb, 1);"
"}";
const GLchar *shader_vsh = (const GLchar*)"attribute vec4 position;"
"attribute vec2 texCoord;"
"uniform float preferredRotation;"
"varying vec2 texCoordVarying;"
"void main()"
"{"
" mat4 rotationMatrix = mat4(cos(preferredRotation), -sin(preferredRotation), 0.0, 0.0,"
" sin(preferredRotation), cos(preferredRotation), 0.0, 0.0,"
" 0.0, 0.0, 1.0, 0.0,"
" 0.0, 0.0, 0.0, 1.0);"
" gl_Position = position * rotationMatrix;"
" texCoordVarying = texCoord;"
"}";
- (BOOL)loadShaders
{
GLuint vertShader = 0, fragShader = 0;
// Create the shader program.
self.program = glCreateProgram();
if(![self compileShaderString:&vertShader type:GL_VERTEX_SHADER shaderString:shader_vsh]) {
NSLog(@"Failed to compile vertex shader");
return NO;
}
if(![self compileShaderString:&fragShader type:GL_FRAGMENT_SHADER shaderString:shader_fsh]) {
NSLog(@"Failed to compile fragment shader");
return NO;
}
// Attach vertex shader to program.
glAttachShader(self.program, vertShader);
// Attach fragment shader to program.
glAttachShader(self.program, fragShader);
// Bind attribute locations. This needs to be done prior to linking.
glBindAttribLocation(self.program, ATTRIB_VERTEX, "position");
glBindAttribLocation(self.program, ATTRIB_TEXCOORD, "texCoord");
// Link the program.
if (![self linkProgram:self.program]) {
NSLog(@"Failed to link program: %d", self.program);
if (vertShader) {
glDeleteShader(vertShader);
vertShader = 0;
}
if (fragShader) {
glDeleteShader(fragShader);
fragShader = 0;
}
if (self.program) {
glDeleteProgram(self.program);
self.program = 0;
}
return NO;
}
// Get uniform locations.
uniforms[UNIFORM_Y] = glGetUniformLocation(self.program, "SamplerY");
uniforms[UNIFORM_UV] = glGetUniformLocation(self.program, "SamplerUV");
// uniforms[UNIFORM_LUMA_THRESHOLD] = glGetUniformLocation(self.program, "lumaThreshold");
// uniforms[UNIFORM_CHROMA_THRESHOLD] = glGetUniformLocation(self.program, "chromaThreshold");
uniforms[UNIFORM_ROTATION_ANGLE] = glGetUniformLocation(self.program, "preferredRotation");
uniforms[UNIFORM_COLOR_CONVERSION_MATRIX] = glGetUniformLocation(self.program, "colorConversionMatrix");
// Release vertex and fragment shaders.
if (vertShader) {
glDetachShader(self.program, vertShader);
glDeleteShader(vertShader);
}
if (fragShader) {
glDetachShader(self.program, fragShader);
glDeleteShader(fragShader);
}
return YES;
}
- (BOOL)compileShaderString:(GLuint *)shader type:(GLenum)type shaderString:(const GLchar*)shaderString
{
*shader = glCreateShader(type);
glShaderSource(*shader, 1, &shaderString, NULL);
glCompileShader(*shader);
#if defined(DEBUG)
GLint logLength;
glGetShaderiv(*shader, GL_INFO_LOG_LENGTH, &logLength);
if (logLength > 0) {
GLchar *log = (GLchar *)malloc(logLength);
glGetShaderInfoLog(*shader, logLength, &logLength, log);
NSLog(@"Shader compile log:\n%s", log);
free(log);
}
#endif
GLint status = 0;
glGetShaderiv(*shader, GL_COMPILE_STATUS, &status);
if (status == 0) {
glDeleteShader(*shader);
return NO;
}
return YES;
}
- (BOOL)compileShader:(GLuint *)shader type:(GLenum)type URL:(NSURL *)URL
{
NSError *error;
NSString *sourceString = [[NSString alloc] initWithContentsOfURL:URL encoding:NSUTF8StringEncoding error:&error];
if (sourceString == nil) {
NSLog(@"Failed to load vertex shader: %@", [error localizedDescription]);
return NO;
}
const GLchar *source = (GLchar *)[sourceString UTF8String];
return [self compileShaderString:shader type:type shaderString:source];
}
- (BOOL)linkProgram:(GLuint)prog
{
GLint status;
glLinkProgram(prog);
#if defined(DEBUG)
GLint logLength;
glGetProgramiv(prog, GL_INFO_LOG_LENGTH, &logLength);
if (logLength > 0) {
GLchar *log = (GLchar *)malloc(logLength);
glGetProgramInfoLog(prog, logLength, &logLength, log);
NSLog(@"Program link log:\n%s", log);
free(log);
}
#endif
glGetProgramiv(prog, GL_LINK_STATUS, &status);
if (status == 0) {
return NO;
}
return YES;
}
- (BOOL)validateProgram:(GLuint)prog
{
GLint logLength, status;
glValidateProgram(prog);
glGetProgramiv(prog, GL_INFO_LOG_LENGTH, &logLength);
if (logLength > 0) {
GLchar *log = (GLchar *)malloc(logLength);
glGetProgramInfoLog(prog, logLength, &logLength, log);
NSLog(@"Program validate log:\n%s", log);
free(log);
}
glGetProgramiv(prog, GL_VALIDATE_STATUS, &status);
if (status == 0) {
return NO;
}
return YES;
}
@end
5. JJH264Decoder.h
#import <Foundation/Foundation.h>
#import <VideoToolbox/VideoToolbox.h>
#import <AVFoundation/AVSampleBufferDisplayLayer.h>
@protocol JJH264DecoderDelegate <NSObject>
- (void)displayDecodedFrame:(CVImageBufferRef )imageBuffer;
@end
@interface JJH264Decoder : NSObject
@property (weak, nonatomic) id<JJH264DecoderDelegate>delegate;
- (BOOL)initH264Decoder;
- (void)decodeNalu:(uint8_t *)frame withSize:(uint32_t)frameSize;
@end
6. JJH264Decoder.m
#import "JJH264Decoder.h"
#import "JJConfigFile.h"
@interface JJH264Decoder()
{
uint8_t *_sps;
NSInteger _spsSize;
uint8_t *_pps;
NSInteger _ppsSize;
VTDecompressionSessionRef _deocderSession;
CMVideoFormatDescriptionRef _decoderFormatDescription;
}
@end
@implementation JJH264Decoder
//解码回调函数
static void didDecompress( void *decompressionOutputRefCon, void *sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags infoFlags, CVImageBufferRef pixelBuffer, CMTime presentationTimeStamp, CMTime presentationDuration ){
CVPixelBufferRef *outputPixelBuffer = (CVPixelBufferRef *)sourceFrameRefCon;
*outputPixelBuffer = CVPixelBufferRetain(pixelBuffer);
JJH264Decoder *decoder = (__bridge JJH264Decoder *)decompressionOutputRefCon;
if (decoder.delegate!=nil)
{
[decoder.delegate displayDecodedFrame:pixelBuffer];
}
}
#pragma mark - Object Public Function
- (BOOL)initH264Decoder
{
if(_deocderSession) {
return YES;
}
const uint8_t* const parameterSetPointers[2] = { _sps, _pps };
const size_t parameterSetSizes[2] = { _spsSize, _ppsSize };
OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault,
2, //param count
parameterSetPointers,
parameterSetSizes,
4, //nal start code size
&_decoderFormatDescription);
if(status == noErr) {
NSDictionary* destinationPixelBufferAttributes = @{
(id)kCVPixelBufferPixelFormatTypeKey : [NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange],
//硬解必须是 kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange
// 或者是kCVPixelFormatType_420YpCbCr8Planar
//因为iOS是 nv12 其他是nv21
(id)kCVPixelBufferWidthKey : [NSNumber numberWithInt:h264outputHeight*2],
(id)kCVPixelBufferHeightKey : [NSNumber numberWithInt:h264outputWidth*2],
//这里款高和编码反的
(id)kCVPixelBufferOpenGLCompatibilityKey : [NSNumber numberWithBool:YES]
};
VTDecompressionOutputCallbackRecord callBackRecord;
callBackRecord.decompressionOutputCallback = didDecompress;
callBackRecord.decompressionOutputRefCon = (__bridge void *)self;
status = VTDecompressionSessionCreate(kCFAllocatorDefault,
_decoderFormatDescription,
NULL,
(__bridge CFDictionaryRef)destinationPixelBufferAttributes,
&callBackRecord,
&_deocderSession);
VTSessionSetProperty(_deocderSession, kVTDecompressionPropertyKey_ThreadCount, (__bridge CFTypeRef)[NSNumber numberWithInt:1]);
VTSessionSetProperty(_deocderSession, kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
} else {
NSLog(@"IOS8VT: reset decoder session failed status=%d", (int)status);
}
return YES;
}
- (void)decodeNalu:(uint8_t *)frame withSize:(uint32_t)frameSize
{
// NSLog(@">>>>>>>>>>开始解码");
int nalu_type = (frame[4] & 0x1F);
CVPixelBufferRef pixelBuffer = NULL;
uint32_t nalSize = (uint32_t)(frameSize - 4);
uint8_t *pNalSize = (uint8_t*)(&nalSize);
frame[0] = *(pNalSize + 3);
frame[1] = *(pNalSize + 2);
frame[2] = *(pNalSize + 1);
frame[3] = *(pNalSize);
//传输的时候。关键帧不能丢数据 否则绿屏 B/P可以丢 这样会卡顿
switch (nalu_type)
{
case 0x05:
// NSLog(@"nalu_type:%d Nal type is IDR frame",nalu_type); //关键帧
if([self initH264Decoder])
{
pixelBuffer = [self decode:frame withSize:frameSize];
}
break;
case 0x07:
// NSLog(@"nalu_type:%d Nal type is SPS",nalu_type); //sps
_spsSize = frameSize - 4;
_sps = malloc(_spsSize);
memcpy(_sps, &frame[4], _spsSize);
break;
case 0x08:
{
// NSLog(@"nalu_type:%d Nal type is PPS",nalu_type); //pps
_ppsSize = frameSize - 4;
_pps = malloc(_ppsSize);
memcpy(_pps, &frame[4], _ppsSize);
break;
}
default:
{
// NSLog(@"Nal type is B/P frame");//其他帧
if([self initH264Decoder])
{
pixelBuffer = [self decode:frame withSize:frameSize];
}
break;
}
}
}
#pragma mark - Object Private Function
- (CVPixelBufferRef)decode:(uint8_t *)frame withSize:(uint32_t)frameSize
{
CVPixelBufferRef outputPixelBuffer = NULL;
CMBlockBufferRef blockBuffer = NULL;
OSStatus status = CMBlockBufferCreateWithMemoryBlock(NULL,
(void *)frame,
frameSize,
kCFAllocatorNull,
NULL,
0,
frameSize,
FALSE,
&blockBuffer);
if(status == kCMBlockBufferNoErr) {
CMSampleBufferRef sampleBuffer = NULL;
const size_t sampleSizeArray[] = {frameSize};
status = CMSampleBufferCreateReady(kCFAllocatorDefault,
blockBuffer,
_decoderFormatDescription ,
1, 0, NULL, 1, sampleSizeArray,
&sampleBuffer);
if (status == kCMBlockBufferNoErr && sampleBuffer) {
VTDecodeFrameFlags flags = 0;
VTDecodeInfoFlags flagOut = 0;
OSStatus decodeStatus = VTDecompressionSessionDecodeFrame(_deocderSession,
sampleBuffer,
flags,
&outputPixelBuffer,
&flagOut);
if(decodeStatus == kVTInvalidSessionErr) {
NSLog(@"IOS8VT: Invalid session, reset decoder session");
} else if(decodeStatus == kVTVideoDecoderBadDataErr) {
NSLog(@"IOS8VT: decode failed status=%d(Bad data)", (int)decodeStatus);
} else if(decodeStatus != noErr) {
NSLog(@"IOS8VT: decode failed status=%d", (int)decodeStatus);
}
CFRelease(sampleBuffer);
}
CFRelease(blockBuffer);
}
return outputPixelBuffer;
}
@end
7. JJH264Encoder.h
#import <Foundation/Foundation.h>
@import AVFoundation;
@protocol JJH264EncoderDelegate <NSObject>
- (void)gotSpsPps:(NSData*)sps pps:(NSData*)pps;
- (void)gotEncodedData:(NSData*)data isKeyFrame:(BOOL)isKeyFrame;
@end
@interface JJH264Encoder : NSObject
- (void)initWithConfiguration;
- (void)initEncode:(int)width height:(int)height;
- (void)encode:(CMSampleBufferRef )sampleBuffer;
@property (weak, nonatomic) id<JJH264EncoderDelegate> delegate;
@end
8. JJH264Encoder.m
#import "JJH264Encoder.h"
@import VideoToolbox;
@import AVFoundation;
@implementation JJH264Encoder
{
NSString * yuvFile;
VTCompressionSessionRef EncodingSession;
dispatch_queue_t aQueue;
CMFormatDescriptionRef format;
CMSampleTimingInfo * timingInfo;
int frameCount;
NSData *sps;
NSData *pps;
}
void didCompressH264(void *outputCallbackRefCon, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags,
CMSampleBufferRef sampleBuffer )
{
if (status != 0) return;
if (!CMSampleBufferDataIsReady(sampleBuffer))
{
NSLog(@"didCompressH264 data is not ready ");
return;
}
JJH264Encoder* encoder = (__bridge JJH264Encoder*)outputCallbackRefCon;
bool keyframe = !CFDictionaryContainsKey( (CFArrayGetValueAtIndex(CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true), 0)), kCMSampleAttachmentKey_NotSync);
if (keyframe)
{
CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
size_t sparameterSetSize, sparameterSetCount;
const uint8_t *sparameterSet;
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 0, &sparameterSet, &sparameterSetSize, &sparameterSetCount, 0 );
if (statusCode == noErr)
{
size_t pparameterSetSize, pparameterSetCount;
const uint8_t *pparameterSet;
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pparameterSet, &pparameterSetSize, &pparameterSetCount, 0 );
if (statusCode == noErr)
{
encoder->sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];
encoder->pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];
if (encoder->_delegate)
{
[encoder->_delegate gotSpsPps:encoder->sps pps:encoder->pps];
}
}
}
}
CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t length, totalLength;
char *dataPointer;
OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer);
if (statusCodeRet == noErr) {
size_t bufferOffset = 0;
static const int AVCCHeaderLength = 4;
while (bufferOffset < totalLength - AVCCHeaderLength)
{
uint32_t NALUnitLength = 0;
memcpy(&NALUnitLength, dataPointer + bufferOffset, AVCCHeaderLength);
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);
NSData* data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + AVCCHeaderLength) length:NALUnitLength];
[encoder->_delegate gotEncodedData:data isKeyFrame:keyframe];
bufferOffset += AVCCHeaderLength + NALUnitLength;
}
}
}
#pragma mark - Object Public Function
- (void) initWithConfiguration
{
EncodingSession = nil;
aQueue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
frameCount = 0;
sps = NULL;
pps = NULL;
}
- (void)initEncode:(int)width height:(int)height
{
dispatch_sync(aQueue, ^{
OSStatus status = VTCompressionSessionCreate(NULL, width, height, kCMVideoCodecType_H264, NULL, NULL, NULL, didCompressH264, (__bridge void *)(self), &EncodingSession);
if (status != 0)
{
NSLog(@"Error by VTCompressionSessionCreate ");
return ;
}
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_RealTime, kCFBooleanTrue);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_ProfileLevel, kVTProfileLevel_H264_Baseline_4_1);
SInt32 bitRate = width*height*50; //越高效果越屌 帧数据越大
CFNumberRef ref = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bitRate);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_AverageBitRate, ref);
CFRelease(ref);
int frameInterval = 10; //关键帧间隔 越低效果越屌 帧数据越大
CFNumberRef frameIntervalRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &frameInterval);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_MaxKeyFrameInterval,frameIntervalRef);
CFRelease(frameIntervalRef);
VTCompressionSessionPrepareToEncodeFrames(EncodingSession);
});
}
- (void)encode:(CMSampleBufferRef )sampleBuffer
{
if (EncodingSession==nil||EncodingSession==NULL)
{
return;
}
dispatch_sync(aQueue, ^{
frameCount++;
CVImageBufferRef imageBuffer = (CVImageBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
CMTime presentationTimeStamp = CMTimeMake(frameCount, 1000);
VTEncodeInfoFlags flags;
OSStatus statusCode = VTCompressionSessionEncodeFrame(EncodingSession,
imageBuffer,
presentationTimeStamp,
kCMTimeInvalid,
NULL, NULL, &flags);
if (statusCode != noErr)
{
if (EncodingSession!=nil||EncodingSession!=NULL)
{
VTCompressionSessionInvalidate(EncodingSession);
CFRelease(EncodingSession);
EncodingSession = NULL;
return;
}
}
});
}
@end
实现效果
下面就看一下实现效果。
后记
未完,待续~~~