四路NV12(YUV420SP)的帧拼接成一个NV12的帧
四路NV12(YUV420SP)的帧拼接成一个NV12的帧,其原理就是根据yuv数据组成方式,把每路帧的数据放到单帧相应位置。
外部调用的话,先MergeStart,然后PutMergeData,最后MergeStop,生成的图像放在gYuvMergedData中,格式还是nv12的格式,外部线程要使用该数据的话,代码里最好再加上互斥锁,防止操作一帧的部分数据的情况。
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
#include <atomic>
#include <memory.h>
#include <pthread.h>
#include <stdlib.h>
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#define CAMERA_WIDTH 1280
#define CAMERA_HEIGHT 720
#define MAX_MERGE_NUM 4
extern void MergeDraw(unsigned char *yuvMergedData);
extern const char *TAG;
bool gIsMergeFlag = true;
int gMergeInitCameraNum = 0;
pthread_t gMergeThread = 0;
unsigned char *gYuvMergedData;
typedef struct _MERGEDATA{
bool isUsed;
uint32_t cameraId;
unsigned char *yuvData;
pthread_mutex_t mutex; //互斥锁
} MERGEDATA;
MERGEDATA gMergeData[MAX_MERGE_NUM];
int MergeDataGetIndex(uint32_t cameraId){
for(int i=0; i<MAX_MERGE_NUM; i++){
if(gMergeData[i].isUsed && gMergeData[i].cameraId == cameraId)
return i;
}
logE(TAG, “MergeDataGetIndex: Can’t Get Camera Index, cameraId:%u”, cameraId);
return -1;
}
int MergeDataInsert(uint32_t cameraId){
for(int i=0; i<MAX_MERGE_NUM; i++){
if(gMergeData[i].isUsed && gMergeData[i].cameraId == cameraId){
logE(TAG, “MergeDataInsert: Have exist same cameraId:%u”, cameraId);
return -2;
}
}
for(int i=0; i<MAX_MERGE_NUM; i++){
if(!gMergeData[i].isUsed){
gMergeData[i].isUsed = true;
if (pthread_mutex_init(&(gMergeData[i].mutex), NULL) != 0){ //init fail
logE(TAG, “MergeDataInsert: pthread_mutex_init error.”);
return -1;
}
return i;
}
}
logE(TAG, “MergeDataInsert: gMergeData is full.”);
return -1;
}
int MergeDataDelete(uint32_t cameraId){
for(int i=0; i<MAX_MERGE_NUM; i++){
if(gMergeData[i].isUsed && gMergeData[i].cameraId == cameraId){
gMergeData[i].cameraId = 0;
free(gMergeData[i].yuvData);
pthread_mutex_destroy(&(gMergeData[i].mutex));
gMergeData[i].isUsed = false;
return i;
}
}
logE(TAG, “MergeDataDelete: Can’t Get Surface Index, cameraId:%u”, cameraId);
return -1;
}
//必须线程方式来合并
void *thread_merge(void *unused) {
logI(TAG, “Enter thread_merge”);
// merge
while (gIsMergeFlag) {
uint64_t yMergedStart = 0;
uint64_t uvMergedStart = CAMERA_WIDTH*CAMERA_HEIGHT*4; //4画面合并
for(int cameraDataIndex=0; cameraDataIndex<MAX_MERGE_NUM; cameraDataIndex++){
//logI(TAG, “thread_merge: start merge, cameraDataIndex: %u”, cameraDataIndex);
if (pthread_mutex_lock(&(gMergeData[cameraDataIndex].mutex)) != 0){
logE(TAG, “thread_merge: pthread_mutex_lock error.”);
}
for(int line=0; line<CAMERA_HEIGHT; line++){
memcpy(gYuvMergedData+yMergedStart+CAMERA_WIDTH*2*line, &(gMergeData[cameraDataIndex].yuvData[CAMERA_WIDTH*line]), CAMERA_WIDTH); //Y Plane data, 拷贝一行
}
for(int line=0; line<CAMERA_HEIGHT/2; line++){
memcpy(gYuvMergedData+uvMergedStart+CAMERA_WIDTH*2*line,
&(gMergeData[cameraDataIndex].yuvData[CAMERA_WIDTH*CAMERA_HEIGHT+CAMERA_WIDTH*line]), CAMERA_WIDTH); //UV Plane data, 拷贝一行
}
pthread_mutex_unlock(&(gMergeData[cameraDataIndex].mutex));
switch(cameraDataIndex){
case 0:
yMergedStart = CAMERA_WIDTH;
uvMergedStart = CAMERA_WIDTH*CAMERA_HEIGHT*4 + CAMERA_WIDTH;
break;
case 1:
yMergedStart = CAMERA_WIDTH*CAMERA_HEIGHT*2;
uvMergedStart = CAMERA_WIDTH*CAMERA_HEIGHT*4 + CAMERA_WIDTH*CAMERA_HEIGHT;
break;
case 2:
yMergedStart = CAMERA_WIDTH*CAMERA_HEIGHT*2 + CAMERA_WIDTH;
uvMergedStart = CAMERA_WIDTH*CAMERA_HEIGHT*4 + CAMERA_WIDTH*CAMERA_HEIGHT + CAMERA_WIDTH;
break;
}
}
//logI(TAG, “thread_merge: start draw.”);
MergeDraw(gYuvMergedData); //测试用,画到Surface上看效果。
usleep(1000 * 33); // 30 frames/second
}
for(int cameraDataIndex=0; cameraDataIndex<MAX_MERGE_NUM; cameraDataIndex++){
MergeDataDelete(gMergeData[cameraDataIndex].cameraId);
}
free(gYuvMergedData);
gMergeInitCameraNum = 0;
gIsMergeFlag = true;
logI(TAG, “Exit thread_merge.”);
return NULL;
}
int PutMergeData(uint32_t cameraID, unsigned char *cameraData) {
//logI(TAG, “PutMergeData: cameraID: %u”, cameraID);
int cameraDataIndex = MergeDataGetIndex(cameraID);
if(cameraDataIndex < 0){
logE(TAG, “PutMergeData: MergeDataGetIndex error, cameraDataIndex: %u”,cameraDataIndex);
return -1;
}
// camera data
if (pthread_mutex_lock(&(gMergeData[cameraDataIndex].mutex)) != 0){
logE(TAG, “PutMergeData: pthread_mutex_lock error, cameraDataIndex: %u”,cameraDataIndex);
return -1;
}
memcpy(gMergeData[cameraDataIndex].yuvData, cameraData, CAMERA_WIDTH*CAMERA_HEIGHT*3/2);
pthread_mutex_unlock(&(gMergeData[cameraDataIndex].mutex));
return 0;
}
int MergeStart(uint32_t cameraID) {
logI(TAG, “MergeStart: cameraID: %u”, cameraID);
int cameraDataIndex = MergeDataInsert(cameraID);
if(cameraDataIndex < 0){
logE(TAG, “MergeStart: MergeDataInsert error, cameraDataIndex:”,cameraDataIndex);
return -1;
}
logI(TAG, “MergeStart: cameraDataIndex: %u”, cameraDataIndex);
gMergeData[cameraDataIndex].yuvData = (unsigned char *)malloc(CAMERA_WIDTH*CAMERA_HEIGHT*3/2);
gMergeData[cameraDataIndex].cameraId = cameraID;
gMergeInitCameraNum ++;
if(gMergeInitCameraNum >= 4){
gIsMergeFlag = true;
gYuvMergedData = (unsigned char *)malloc(4*CAMERA_WIDTH*CAMERA_HEIGHT*3/2); //4个画面合并
memset(gYuvMergedData, 0, 4*CAMERA_WIDTH*CAMERA_HEIGHT*3/2);
if (pthread_create(&gMergeThread, NULL, thread_merge, NULL)!=0) {
logE(TAG, “MergeStart: pthread_create error.”);
for(int cameraDataIndex=0; cameraDataIndex<MAX_MERGE_NUM; cameraDataIndex++){
MergeDataDelete(gMergeData[cameraDataIndex].cameraId);
}
free(gYuvMergedData);
return -1;
}
}
return 0;
}
int MergeStop() {
logI(TAG, “MergeStop.”);
gIsMergeFlag = false;
if(gMergeThread != 0){
if(pthread_join(gMergeThread, NULL)!=0){
logE(TAG, “MergeStop: pthread_join error, gMergeThread:”, gMergeThread);
return -1;
}
gMergeThread = 0;
}
return 0;
}