CVPixelBufferCreate创建出来的buffer每行字节数比指定的宽度要长32个字节

CVPixelBufferCreate创建出来的buffer每行字节数比指定的宽度要长一32个字节

做一个功能,用一组图片组装一个视频。
最后生成的视频图片是乱码,找了好久原因,发现是CVPixelBufferCreate的理解不够,错误的使用了自己计算的每行像素字节数。

把图片转换成字节的函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
static func pixeleBufferFromUIImage(image: UIImage) -> CVPixelBuffer? {
let width = 600 //Int(image.size.width)
let height = 600 //Int(image.size.height)
let options : Dictionary<String, AnyObject>=
[kCVPixelFormatCGImageCompatibility as String: NSNumber(bool : true),
kCVPixelFormatCGBitmapContextCompatibility as String : NSNumber(bool : true)]
//print("kCVPixelFormatType_32ARGB \(kCVPixelFormatType_32ARGB) _ \(CGImageAlphaInfo.NoneSkipFirst.rawValue)")

var pxBuffer : CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_32ARGB, options, &pxBuffer)
guard (status == kCVReturnSuccess && pxBuffer != nil ) else {
return nil
}

CVPixelBufferLockBaseAddress(pxBuffer!, 0)
let pxData = CVPixelBufferGetBaseAddress(pxBuffer!)

let colorSpace = CGColorSpaceCreateDeviceRGB()
print("CVPixelBufferGetBytesPerRow(pxBuffer!)=\(CVPixelBufferGetBytesPerRow(pxBuffer!))") //结果为CVPixelBufferGetBytesPerRow(pxBuffer!)=2432,不是简单的600 * 4 = 2400
//把CVPixelBufferGetBytesPerRow(pxBuffer!)换成 (width* 4)则最后输出到视频里是乱码
let context = CGBitmapContextCreate(pxData, width, height, 8, CVPixelBufferGetBytesPerRow(pxBuffer!), colorSpace, CGImageAlphaInfo.NoneSkipFirst.rawValue)
CGContextClearRect(context, CGRect(x: 0, y: 0, width: width, height: height))
CGContextDrawImage(context!, CGRect(x: 0, y: 0, width: width, height: height), image.CGImage)
CVPixelBufferUnlockBaseAddress(pxBuffer!, 0)

//一个陷阱,计算指定BytesPerRow参数时,因为计算出的值比实际的小。这里你从context中获取出来的图片是正确的。
return pxBuffer!
}

把buffer写入video的例子代码。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
static public func images2Video(lstImages :[UIImage], outputUrl : NSURL) throws -> Bool {
let videoWriter = try AVAssetWriter(URL: outputUrl, fileType: AVFileTypeMPEG4)
let outputSetting = [AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : NSNumber(int: 600),
AVVideoHeightKey : NSNumber(int: 600)
]

if(!videoWriter.canApplyOutputSettings(outputSetting, forMediaType: AVMediaTypeVideo)){
print("not support output setting.")
return false
}

let videoWriteInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSetting)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt : kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String : NSNumber(int : 600),
kCVPixelBufferHeightKey as String : NSNumber(int : 600),
]

let adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriteInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
videoWriter.addInput(videoWriteInput)
videoWriter.startWriting()
videoWriter.startSessionAtSourceTime(kCMTimeZero)

var idxFrame = 0
var buffer : CVPixelBuffer?
while idxFrame < lstImages.count {
if(videoWriteInput.readyForMoreMediaData){
print("ready for add next pic")
//每张图片展示0.5s
let frameTime = CMTime(seconds: 0.5, preferredTimescale: 600)
let lastTime = CMTime(seconds: Int64(idxFrame * 0.5), timescale: 600)
var presentTime = CMTimeAdd(lastTime, frameTime)
if(idxFrame == 0){
presentTime = CMTime(value: 0, timescale: 600)
}

let buffer = pixeleBufferFromUIImage(lstImages[idxFrame])
//这里是直接使用buffer,所以会根据buffer的属性来获取数据,如果上面的函数使用了错误的参数,这里写入video就会是乱码。
let res = adapter.appendPixelBuffer(buffer!, withPresentationTime: presentTime)
idxFrame += 1
}
}
videoWriteInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler {
if(videoWriter.status == .Completed) {
print("success")
}else {
print("error=\(videoWriter.error)")
}
}
return true
}

参考代码
RRVideoKit
UIImageToVideo