ffmpeg转码本地文件(一)
实现目标:输入本地文件。实现本地文件转码,里面包括mux层转码,codec层转码,视频格式转换,音频重採样等功能,功能点请看凝视。注意:凝视非常重要。
#ifndef __FFMPEG_H__
#define __FFMPEG_H__
#include "info.h"
extern "C"
{
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libavutil/avutil.h"
#include "libavutil/mathematics.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/samplefmt.h"
#include "libavdevice/avdevice.h" //摄像头所用
#include "libavfilter/avfilter.h"
#include "libavutil/error.h"
#include "libavutil/mathematics.h"
#include "libavutil/time.h"
#include "libavutil/fifo.h"
#include "libavutil/audio_fifo.h" //这里是做分片时候重採样编码音频用的
#include "inttypes.h"
#include "stdint.h"
};
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"avdevice.lib")
#pragma comment(lib,"avfilter.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"postproc.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"swscale.lib")
//#define INPUTURL "../in_stream/22.flv"
//#define INPUTURL "../in_stream/闪电侠.The.Flash.S01E01.中英字幕.HDTVrip.624X352.mp4"
//#define INPUTURL "../in_stream/33.ts"
//#define INPUTURL "../in_stream/22mp4.mp4"
//#define INPUTURL "../in_stream/EYED0081.MOV"
//#define INPUTURL "../in_stream/李荣浩 - 李白.mp3"
//#define INPUTURL "../in_stream/avier1.mp4"
//#define INPUTURL "../in_stream/分歧者2预告片.mp4"
//#define INPUTURL "../in_stream/Class8简单介绍.m4v"
//#define INPUTURL "../in_stream/22.ts"
//#define INPUTURL "../in_stream/44.mp3"
//#define INPUTURL "../in_stream/ceshi.mp4"
//#define INPUTURL "../in_stream/33.mp4"
#define INPUTURL "../in_stream/father.avi"
//#define INPUTURL "../in_stream/西海情歌.wav"
//#define INPUTURL "../in_stream/Furious_7_2015_International_Trailer_2_5.1-1080p-HDTN.mp4"
//#define INPUTURL "../in_stream/Wildlife.wmv"
//#define INPUTURL "../in_stream/单身男女2.HD1280超清国语版.mp4"
//#define INPUTURL "rtmp://221.228.193.50:1935/live/teststream1"
#define OUTPUTURL "../out_stream/father1111.mp4"
//#define OUTPUTURL "rtmp://221.228.193.50:1935/live/zwg"
//#define OUTPUTURL "rtmp://221.228.193.50:1935/live/zwg"
enum AVSampleFormat_t
{
AV_SAMPLE_FMT_NONE_t = -1,
AV_SAMPLE_FMT_U8_t, ///< unsigned 8 bits
AV_SAMPLE_FMT_S16_t, ///< signed 16 bits
AV_SAMPLE_FMT_S32_t, ///< signed 32 bits
AV_SAMPLE_FMT_FLT_t, ///< float
AV_SAMPLE_FMT_DBL_t, ///< double
AV\_SAMPLE\_FMT\_U8P\_t, ///< unsigned 8 bits, planar
AV\_SAMPLE\_FMT\_S16P\_t, ///< signed 16 bits, planar
AV\_SAMPLE\_FMT\_S32P\_t, ///< signed 32 bits, planar
AV\_SAMPLE\_FMT\_FLTP\_t, ///< float, planar
AV\_SAMPLE\_FMT\_DBLP\_t, ///< double, planar
AV\_SAMPLE\_FMT\_NB\_t ///< Number of sample formats. DO NOT USE if linking dynamically
};
/************************************************************************/
/************************************************************************/
//是否用音频fifo 否则用全局的fifo 两种方法
//#define AUDIO_FIFO
/************************************************************************/
/************************************************************************/
//video param
extern int m_dwWidth;
extern int m_dwHeight;
extern double m_dbFrameRate; //帧率
extern AVCodecID video_codecID;
extern AVPixelFormat video_pixelfromat;
extern int gop_size;
extern int max_b_frame;
extern int thread_count; //用cpu内核数目
//audio param
extern int m_dwChannelCount; //声道
extern AVSampleFormat_t m_dwBitsPerSample; //样本
extern int m_dwFrequency; //採样率
extern AVCodecID audio_codecID;
extern int audio_frame_size;
extern int m_audiomuxtimebasetrue; //音频的mux层timebse是否正确
extern AVFifoBuffer * m_fifo; //存放pcm数据
extern AVAudioFifo * m_audiofifo; //音频存放pcm数据
extern int64_t m_first_audio_pts; //第一帧的音频pts
extern int m_is_first_audio_pts; //是否已经记录第一帧音频时间戳
#define AUDIO_ID 0 //packet 中的ID ,假设先增加音频 pocket 则音频是 0 视频是1。否则相反(影响add_out_stream顺序)
#define VIDEO_ID 1
extern int nRet; //状态标志
extern AVFormatContext* icodec; //输入流context
extern AVFormatContext* ocodec ; //输出流context
extern char szError[256]; //错误字符串
extern AVStream* ovideo_st;
extern AVStream* oaudio_st;
extern int video_stream_idx;
extern int audio_stream_idx;
extern AVCodec *audio_codec;
extern AVCodec *video_codec;
extern AVPacket pkt;
extern AVBitStreamFilterContext * vbsf_aac_adtstoasc; //aac->adts to asc过滤器
static struct SwsContext * img_convert_ctx_video = NULL;
static int sws_flags = SWS_BICUBIC; //差值算法,双三次
int init_demux(char * Filename,AVFormatContext ** iframe_c);
int init_mux();
int uinit_demux();
int uinit_mux();
//for mux
AVStream * add_out_stream(AVFormatContext* output_format_context,AVMediaType codec_type_t);
//for codec
AVStream * add_out_stream2(AVFormatContext* output_format_context,AVMediaType codec_type_t,AVCodec **codec);
int init_decode(int stream_type);
int init_code(int stream_type);
int uinit_decode(int stream_type);
int uinit_code(int stream_type);
int perform_decode(int stream_type,AVFrame * picture);
int perform_code(int stream_type,AVFrame * picture); //用于AVFifoBuffer
int perform_code2(int stream_type,AVFrame * picture); //用于AVAudioFifo
void perform_yuv_conversion(AVFrame * pinframe,AVFrame * poutframe);
SwrContext * init_pcm_resample(AVFrame *in_frame, AVFrame *out_frame);
void uinit_pcm_resample(AVFrame * poutframe,SwrContext * swr_ctx);
int preform_pcm_resample(SwrContext * pSwrCtx,AVFrame *in_frame, AVFrame *out_frame);
int audio_support(AVCodec * pCodec,int *channel,int * playout,int *samplePerSec,AVSampleFormat_t *sample_fmt);
int video_support(AVCodec * pCodec,AVPixelFormat * video_pixelfromat);
int transcode();
void write_frame(AVFormatContext *ocodec,int ID,AVPacket pkt_t); //这个是依据传过来的buf 和size 写入文件
#endif
#include "ffmpeg.h"
int nRet = 0;
AVFormatContext* icodec = NULL;
AVFormatContext* ocodec = NULL;
char szError[256];
AVStream * ovideo_st = NULL;
AVStream * oaudio_st = NULL;
int video_stream_idx = -1;
int audio_stream_idx = -1;
AVCodec *audio_codec = NULL;
AVCodec *video_codec = NULL;
AVPacket pkt;
AVBitStreamFilterContext * vbsf_aac_adtstoasc = NULL;
//video param
int m_dwWidth = 640;
int m_dwHeight = 480;
double m_dbFrameRate = 23; //帧率
AVCodecID video_codecID = AV_CODEC_ID_H264;
AVPixelFormat video_pixelfromat = AV_PIX_FMT_YUV420P;
int bit_rate = 400000;
int gop_size = 12;
int max_b_frame = 2;
int thread_count = 2;
//audio param
int m_dwChannelCount = 2; //声道
int m_dwFrequency = 48000; //採样率
AVSampleFormat_t m_dwBitsPerSample = AV_SAMPLE_FMT_S16_t; //样本
int m_audiomuxtimebasetrue = 1; //音频的mux层timebse是否正确
//aac
AVCodecID audio_codecID = AV_CODEC_ID_AAC;
int audio_frame_size = 1024;
//mp3
//AVCodecID audio_codecID = AV_CODEC_ID_MP3;
//int audio_frame_size = 1152;
AVFifoBuffer * m_fifo = NULL;
AVAudioFifo * m_audiofifo = NULL;
int64_t m_first_audio_pts = 0;
int m_is_first_audio_pts = 0;
int init_demux(char * Filename,AVFormatContext ** iframe_c)
{
int i = 0;
nRet = avformat_open_input(iframe_c, Filename,NULL, NULL);
if (nRet != 0)
{
av_strerror(nRet, szError, 256);
printf(szError);
printf("\n");
printf("Call avformat_open_input function failed!\n");
return 0;
}
if (avformat_find_stream_info(*iframe_c,NULL) < 0)
{
printf("Call av_find_stream_info function failed!\n");
return 0;
}
//输出视频信息
av_dump_format(*iframe_c, -1, Filename, 0);
//加入音频信息到输出context
for (i = 0; i < (\*iframe\_c)->nb\_streams; i++)
{
if ((\*iframe\_c)->streams\[i\]->codec->codec\_type == AVMEDIA\_TYPE\_VIDEO)
{
double FrameRate = (\*iframe\_c)->streams\[i\]->r\_frame\_rate.num /(double)(\*iframe\_c)->streams\[i\]->r\_frame\_rate.den;
m\_dbFrameRate =(int)(FrameRate + 0.5);
video\_stream\_idx = i;
}
else if ((\*iframe\_c)->streams\[i\]->codec->codec\_type == AVMEDIA\_TYPE\_AUDIO)
{
audio\_stream\_idx = i;
if(icodec->streams\[audio\_stream\_idx\]->time\_base.den == 0 ||
icodec->streams\[audio\_stream\_idx\]->time\_base.num == 0 ||
icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate == 0)
{
m\_audiomuxtimebasetrue = 0;
}
}
}
return 1;
}
int init_mux()
{
int i = 0;
int ret = 0;
/* allocate the output media context */
avformat_alloc_output_context2(&ocodec, NULL,NULL, OUTPUTURL);
if (!ocodec)
{
return getchar();
}
AVOutputFormat* ofmt = NULL;
ofmt = ocodec->oformat;
/\* open the output file, if needed \*/
if (!(ofmt->flags & AVFMT\_NOFILE))
{
if (avio\_open(&ocodec->pb, OUTPUTURL, AVIO\_FLAG\_WRITE) < 0)
{
printf("Could not open '%s'\\n", OUTPUTURL);
return getchar();
}
}
//这里加入的时候AUDIO\_ID/VIDEO\_ID有影响
//加入音频信息到输出context
if(audio\_stream\_idx != -1)//假设存在音频
{
ofmt->audio\_codec = audio\_codecID;
//假设是音频须要编解码
if(audio\_codecID != icodec->streams\[audio\_stream\_idx\]->codec->codec\_id ||
1 != icodec->streams\[audio\_stream\_idx\]->codec->sample\_fmt)
{
oaudio\_st = add\_out\_stream2(ocodec, AVMEDIA\_TYPE\_AUDIO,&audio\_codec);
}
else
{
oaudio\_st = add\_out\_stream(ocodec, AVMEDIA\_TYPE\_AUDIO);
}
if ((strstr(ocodec->oformat->name, "flv") != NULL) ||
(strstr(ocodec->oformat->name, "mp4") != NULL) ||
(strstr(ocodec->oformat->name, "mov") != NULL) ||
(strstr(ocodec->oformat->name, "3gp") != NULL))
{
if (oaudio\_st->codec->codec\_id == AV\_CODEC\_ID\_AAC)
{
vbsf\_aac\_adtstoasc = av\_bitstream\_filter\_init("aac\_adtstoasc");
if(vbsf\_aac\_adtstoasc == NULL)
{
return -1;
}
}
}
}
//加入视频信息到输出context
if (video\_stream\_idx != -1)//假设存在视频
{
ofmt->video\_codec = video\_codecID;
//假设是视频须要编解码
if(bit\_rate != icodec->streams\[video\_stream\_idx\]->codec->bit\_rate ||
m\_dwWidth != icodec->streams\[video\_stream\_idx\]->codec->width ||
m\_dwHeight != icodec->streams\[video\_stream\_idx\]->codec->height ||
video\_codecID != icodec->streams\[video\_stream\_idx\]->codec->codec\_id ||
m\_dbFrameRate != av\_q2d(icodec->streams\[video\_stream\_idx\]->r\_frame\_rate))
{
ovideo\_st = add\_out\_stream2(ocodec, AVMEDIA\_TYPE\_VIDEO,&video\_codec);
}
else
{
ovideo\_st = add\_out\_stream(ocodec,AVMEDIA\_TYPE\_VIDEO);
}
}
av\_dump\_format(ocodec, 0, OUTPUTURL, 1);
if (video\_stream\_idx != -1)//假设存在视频
{
//假设是视频须要编解码
if(bit\_rate != icodec->streams\[video\_stream\_idx\]->codec->bit\_rate ||
m\_dwWidth != icodec->streams\[video\_stream\_idx\]->codec->width ||
m\_dwHeight != icodec->streams\[video\_stream\_idx\]->codec->height ||
video\_codecID != icodec->streams\[video\_stream\_idx\]->codec->codec\_id ||
m\_dbFrameRate != av\_q2d(icodec->streams\[video\_stream\_idx\]->r\_frame\_rate))
{
//解码初始化
ret = init\_decode(VIDEO\_ID);
//编码初始化
ret = init\_code(VIDEO\_ID);
}
}
//假设是音频须要编解码
if(audio\_stream\_idx != -1)//假设存在音频
{
if(audio\_codecID != icodec->streams\[audio\_stream\_idx\]->codec->codec\_id ||
1 != icodec->streams\[audio\_stream\_idx\]->codec->sample\_fmt)
{
//解码初始化
ret = init\_decode(AUDIO\_ID);
//编码初始化
ret = init\_code(AUDIO\_ID);
}
}
ret = avformat\_write\_header(ocodec, NULL);
if (ret != 0)
{
printf("Call avformat\_write\_header function failed.\\n");
return 0;
}
return 1;
}
int uinit_demux()
{
/* free the stream */
av_free(icodec);
return 1;
}
int uinit_mux()
{
int i = 0;
nRet = av_write_trailer(ocodec);
if (nRet < 0)
{
av_strerror(nRet, szError, 256);
printf(szError);
printf("\n");
printf("Call av_write_trailer function failed\n");
}
if (vbsf_aac_adtstoasc !=NULL)
{
av_bitstream_filter_close(vbsf_aac_adtstoasc);
vbsf_aac_adtstoasc = NULL;
}
av_dump_format(ocodec, -1, OUTPUTURL, 1);
if (video\_stream\_idx != -1)//假设存在视频
{
//假设是视频须要编解码
if(bit\_rate != icodec->streams\[video\_stream\_idx\]->codec->bit\_rate ||
m\_dwWidth != icodec->streams\[video\_stream\_idx\]->codec->width ||
m\_dwHeight != icodec->streams\[video\_stream\_idx\]->codec->height ||
video\_codecID != icodec->streams\[video\_stream\_idx\]->codec->codec\_id ||
m\_dbFrameRate != av\_q2d(icodec->streams\[video\_stream\_idx\]->r\_frame\_rate))
{
uinit\_decode(VIDEO\_ID);
uinit\_code(VIDEO\_ID);
}
}
if(audio\_stream\_idx != -1)//假设存在音频
{
//假设是音频须要编解码
if(audio\_codecID != icodec->streams\[audio\_stream\_idx\]->codec->codec\_id ||
1 != icodec->streams\[audio\_stream\_idx\]->codec->sample\_fmt)
{
uinit\_decode(AUDIO\_ID);
uinit\_code(AUDIO\_ID);
}
}
/\* Free the streams. \*/
for (i = 0; i < ocodec->nb\_streams; i++)
{
av\_freep(&ocodec->streams\[i\]->codec);
av\_freep(&ocodec->streams\[i\]);
}
if (!(ocodec->oformat->flags & AVFMT\_NOFILE))
{
/\* Close the output file. \*/
avio\_close(ocodec->pb);
}
av\_free(ocodec);
return 1;
}
int init_decode(int stream_type)
{
AVCodec *pcodec = NULL;
AVCodecContext *cctext = NULL;
if (stream\_type == AUDIO\_ID)
{
cctext = icodec->streams\[audio\_stream\_idx\]->codec;
pcodec = avcodec\_find\_decoder(cctext->codec\_id);
if (!pcodec)
{
return -1;
}
}
else if (stream\_type == VIDEO\_ID)
{
cctext = icodec->streams\[video\_stream\_idx\]->codec;
pcodec = avcodec\_find\_decoder(cctext->codec\_id);
if (!pcodec)
{
return -1;
}
}
//打开解码器
nRet = avcodec\_open2(cctext, pcodec, NULL);
if (nRet < 0)
{
printf("Could not open decoder\\n");
return -1;
}
return 1;
}
int init_code(int stream_type)
{
AVCodecContext *cctext = NULL;
if (stream\_type == AUDIO\_ID)
{
cctext = oaudio\_st->codec;
//打开编码器
nRet = avcodec\_open2(cctext, audio\_codec, NULL);
if (nRet < 0)
{
printf("Could not open encoder\\n");
return 0;
}
}
else if (stream\_type == VIDEO\_ID)
{
cctext = ovideo\_st->codec;
//打开编码器
nRet = avcodec\_open2(cctext, video\_codec, NULL);
if (nRet < 0)
{
printf("Could not open encoder\\n");
return -1;
}
}
return 1;
}
int uinit_decode(int stream_type)
{
AVCodecContext *cctext = NULL;
if (stream\_type == AUDIO\_ID)
{
cctext = oaudio\_st->codec;
}
else if (stream\_type == VIDEO\_ID)
{
cctext = icodec->streams\[video\_stream\_idx\]->codec;
}
avcodec\_close(cctext);
return 1;
}
int uinit_code(int stream_type)
{
AVCodecContext *cctext = NULL;
if (stream\_type == AUDIO\_ID)
{
cctext = oaudio\_st->codec;
}
else if (stream\_type == VIDEO\_ID)
{
cctext = ovideo\_st->codec;
}
avcodec\_close(cctext);
return 1;
}
AVStream * add_out_stream(AVFormatContext* output_format_context,AVMediaType codec_type_t)
{
AVStream * in_stream = NULL;
AVStream * output_stream = NULL;
AVCodecContext* output_codec_context = NULL;
output\_stream = avformat\_new\_stream(output\_format\_context,NULL);
if (!output\_stream)
{
return NULL;
}
switch (codec\_type\_t)
{
case AVMEDIA\_TYPE\_AUDIO:
in\_stream = icodec->streams\[audio\_stream\_idx\];
break;
case AVMEDIA\_TYPE\_VIDEO:
in\_stream = icodec->streams\[video\_stream\_idx\];
break;
default:
break;
}
output\_stream->id = output\_format\_context->nb\_streams - 1;
output\_codec\_context = output\_stream->codec;
output\_stream->time\_base = in\_stream->time\_base;
int ret = 0;
ret = avcodec\_copy\_context(output\_stream->codec, in\_stream->codec);
if (ret < 0)
{
printf("Failed to copy context from input to output stream codec context\\n");
return NULL;
}
//这个非常重要。要么纯复用解复用。不做编解码写头会失败,
//另或者须要编解码假设不这样,生成的文件没有预览图,还有加入以下的header失败,置0之后会又一次生成extradata
output\_codec\_context->codec\_tag = 0;
//if(! strcmp( output\_format\_context-> oformat-> name, "mp4" ) ||
//!strcmp (output\_format\_context ->oformat ->name , "mov" ) ||
//!strcmp (output\_format\_context ->oformat ->name , "3gp" ) ||
//!strcmp (output\_format\_context ->oformat ->name , "flv"))
if(AVFMT\_GLOBALHEADER & output\_format\_context->oformat->flags)
{
output\_codec\_context->flags |= CODEC\_FLAG\_GLOBAL\_HEADER;
}
return output\_stream;
}
AVStream * add_out_stream2(AVFormatContext* output_format_context,AVMediaType codec_type_t,AVCodec **codec)
{
AVCodecContext* output_codec_context = NULL;
AVStream * in_stream = NULL;
AVStream * output_stream = NULL;
AVCodecID codecID;
switch (codec\_type\_t)
{
case AVMEDIA\_TYPE\_AUDIO:
codecID = audio\_codecID;
in\_stream = icodec->streams\[audio\_stream\_idx\];
break;
case AVMEDIA\_TYPE\_VIDEO:
codecID = video\_codecID;
in\_stream = icodec->streams\[video\_stream\_idx\];
break;
default:
break;
}
/\* find the encoder \*/
\*codec = avcodec\_find\_encoder(codecID);
if (!(\*codec))
{
return NULL;
}
output\_stream = avformat\_new\_stream(output\_format\_context,\*codec);
if (!output\_stream)
{
return NULL;
}
output\_stream->id = output\_format\_context->nb\_streams - 1;
output\_codec\_context = output\_stream->codec;
output\_stream->time\_base = in\_stream->time\_base;
switch (codec\_type\_t)
{
case AVMEDIA\_TYPE\_AUDIO:
output\_codec\_context->codec\_id = audio\_codecID;
output\_codec\_context->codec\_type = codec\_type\_t;
output\_stream->start\_time = 0;
output\_codec\_context->sample\_rate = icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate;//m\_dwFrequency;
if(icodec->streams\[audio\_stream\_idx\]->codec->channels > 2)
{
output\_codec\_context->channels = m\_dwChannelCount;
output\_codec\_context->channel\_layout = av\_get\_default\_channel\_layout(m\_dwChannelCount);
}
else
{
output\_codec\_context->channels = icodec->streams\[audio\_stream\_idx\]->codec->channels;
if (icodec->streams\[audio\_stream\_idx\]->codec->channel\_layout == 0)
{
output\_codec\_context->channel\_layout = av\_get\_default\_channel\_layout(icodec->streams\[audio\_stream\_idx\]->codec->channels);
}
else
{
output\_codec\_context->channel\_layout = icodec->streams\[audio\_stream\_idx\]->codec->channel\_layout;
}
}
//这个码率有些编码器不支持特别大,比如wav的码率是1411200 比aac大了10倍多
output\_codec\_context->bit\_rate = 128000;//icodec->streams\[audio\_stream\_idx\]->codec->bit\_rate;
output\_codec\_context->frame\_size = audio\_frame\_size;
output\_codec\_context->sample\_fmt = (AVSampleFormat)m\_dwBitsPerSample; //样本
output\_codec\_context->block\_align = 0;
//查看音频支持的声道,採样率。样本
audio\_support(\*codec,&output\_codec\_context->channels,
(int \*)&output\_codec\_context->channel\_layout,
&output\_codec\_context->sample\_rate,
(AVSampleFormat\_t \*)&output\_codec\_context->sample\_fmt);
m\_dwChannelCount = output\_codec\_context->channels;
m\_dwFrequency = output\_codec\_context->sample\_rate;
m\_dwBitsPerSample = (AVSampleFormat\_t)output\_codec\_context->sample\_fmt;
break;
case AVMEDIA\_TYPE\_VIDEO:
AVRational r\_frame\_rate\_t;
r\_frame\_rate\_t.num = 100;
r\_frame\_rate\_t.den = (int)(m\_dbFrameRate \* 100);
output\_codec\_context->time\_base = in\_stream->codec->time\_base;
output\_stream->time\_base = in\_stream->time\_base;
output\_stream->r\_frame\_rate.num = r\_frame\_rate\_t.den;
output\_stream->r\_frame\_rate.den = r\_frame\_rate\_t.num;
output\_codec\_context->codec\_id = video\_codecID;
output\_codec\_context->codec\_type = codec\_type\_t;
output\_stream->start\_time = 0;
output\_codec\_context->pix\_fmt = video\_pixelfromat;
output\_codec\_context->width = m\_dwWidth;
output\_codec\_context->height = m\_dwHeight;
output\_codec\_context->bit\_rate = bit\_rate;
output\_codec\_context->gop\_size = gop\_size; /\* emit one intra frame every twelve frames at most \*/;
output\_codec\_context->max\_b\_frames = max\_b\_frame; //设置B帧最大数
output\_codec\_context->thread\_count = thread\_count; //线程数目
output\_codec\_context->me\_range = 16;
output\_codec\_context->max\_qdiff = 4;
output\_codec\_context->qmin = 20; //调节清晰度和编码速度 //这个值调节编码后输出数据量越大输出数据量越小。越大编码速度越快。清晰度越差
output\_codec\_context->qmax = 40; //调节清晰度和编码速度
output\_codec\_context->qcompress = 0.6;
//查看视频支持的yuv格式
video\_support(\*codec,&output\_codec\_context->pix\_fmt);
video\_pixelfromat = output\_codec\_context->pix\_fmt;
break;
default:
break;
}
//这个非常重要,要么纯复用解复用,不做编解码写头会失败,
//另或者须要编解码假设不这样。生成的文件没有预览图,还有加入以下的header失败,置0之后会又一次生成extradata
output\_codec\_context->codec\_tag = 0;
//if(! strcmp( output\_format\_context-> oformat-> name, "mp4" ) ||
// !strcmp (output\_format\_context ->oformat ->name , "mov" ) ||
// !strcmp (output\_format\_context ->oformat ->name , "3gp" ) ||
// !strcmp (output\_format\_context ->oformat ->name , "flv" ))
if(AVFMT\_GLOBALHEADER & output\_format\_context->oformat->flags)
{
output\_codec\_context->flags |= CODEC\_FLAG\_GLOBAL\_HEADER;
}
return output\_stream;
}
int perform_decode(int stream_type,AVFrame * picture)
{
AVCodecContext *cctext = NULL;
int frameFinished = 0 ;
if (stream\_type == AUDIO\_ID)
{
cctext = icodec->streams\[audio\_stream\_idx\]->codec;
avcodec\_decode\_audio4(cctext,picture,&frameFinished,&pkt);
if(frameFinished)
{
return 0;
}
}
else if (stream\_type == VIDEO\_ID)
{
cctext = icodec->streams\[video\_stream\_idx\]->codec;
avcodec\_decode\_video2(cctext,picture,&frameFinished,&pkt);
if(frameFinished)
{
return 0;
}
}
return 1;
}
int perform_code(int stream_type,AVFrame * picture)
{
AVCodecContext *cctext = NULL;
AVPacket pkt_t;
av_init_packet(&pkt_t);
pkt_t.data = NULL; // packet data will be allocated by the encoder
pkt_t.size = 0;
int frameFinished = 0 ;
if (stream\_type == AUDIO\_ID)
{
cctext = oaudio\_st->codec;
//假设进和出的的声道,样本,採样率不同,须要重採样
if(icodec->streams\[audio\_stream\_idx\]->codec->sample\_fmt != (AVSampleFormat)m\_dwBitsPerSample ||
icodec->streams\[audio\_stream\_idx\]->codec->channels != m\_dwChannelCount ||
icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate != m\_dwFrequency)
{
int64\_t pts\_t = picture->pts;
int duration\_t = 0;
if(m\_audiomuxtimebasetrue == 0)
{
duration\_t = (double)cctext->frame\_size \* (icodec->streams\[audio\_stream\_idx\]->codec->time\_base.den /icodec->streams\[audio\_stream\_idx\]->codec->time\_base.num)/
icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate;
}
else
{
duration\_t = (double)cctext->frame\_size \* (icodec->streams\[audio\_stream\_idx\]->time\_base.den /icodec->streams\[audio\_stream\_idx\]->time\_base.num)/
icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate;
}
int frame\_bytes = cctext->frame\_size \* av\_get\_bytes\_per\_sample(cctext->sample\_fmt)\* cctext->channels;
AVFrame \* pFrameResample = avcodec\_alloc\_frame();
uint8\_t \* readbuff = new uint8\_t\[frame\_bytes\];
if(av\_sample\_fmt\_is\_planar(cctext->sample\_fmt))
{
frame\_bytes /= cctext->channels;
}
while (av\_fifo\_size(m\_fifo) >= frame\_bytes) //取出写入的未读的包
{
pFrameResample->nb\_samples = cctext->frame\_size;
av\_fifo\_generic\_read(m\_fifo, readbuff, frame\_bytes, NULL);
//这里一定要考虑音频分片的问题
//假设是分片的avcodec\_fill\_audio\_frame传入的buf是单声道的,可是buf\_size 是两个声道加一起的数据量
//假设不是分片的avcodec\_fill\_audio\_frame传入的buf是双声道的,buf\_size 是两个声道加一起的数据量
if(av\_sample\_fmt\_is\_planar(cctext->sample\_fmt))
{
avcodec\_fill\_audio\_frame(pFrameResample,cctext->channels,cctext->sample\_fmt,readbuff,frame\_bytes \* cctext->channels,1);
}
else
{
avcodec\_fill\_audio\_frame(pFrameResample,cctext->channels,cctext->sample\_fmt,readbuff,frame\_bytes,0);
}
if(m\_is\_first\_audio\_pts == 0)
{
m\_first\_audio\_pts = pts\_t;
m\_is\_first\_audio\_pts = 1;
}
pFrameResample->pts = m\_first\_audio\_pts;
m\_first\_audio\_pts += duration\_t;
pFrameResample->pts = av\_rescale\_q\_rnd(pFrameResample->pts, icodec->streams\[audio\_stream\_idx\]->codec->time\_base, oaudio\_st->codec->time\_base, AV\_ROUND\_NEAR\_INF);
nRet = avcodec\_encode\_audio2(cctext,&pkt\_t,pFrameResample,&frameFinished);
if (nRet>=0 && frameFinished)
{
write\_frame(ocodec,AUDIO\_ID,pkt\_t);
av\_free\_packet(&pkt\_t);
}
}
if (readbuff)
{
delete \[\]readbuff;
readbuff = NULL;
}
if (pFrameResample)
{
av\_free(pFrameResample);
pFrameResample = NULL;
}
}
else
{
nRet = avcodec\_encode\_audio2(cctext,&pkt\_t,picture,&frameFinished);
if (nRet>=0 && frameFinished)
{
write\_frame(ocodec,AUDIO\_ID,pkt\_t);
av\_free\_packet(&pkt\_t);
}
}
}
else if (stream\_type == VIDEO\_ID)
{
cctext = ovideo\_st->codec;
//ticks\_per\_frame这个值用的不正确
//if(icodec->streams\[video\_stream\_idx\]->codec->ticks\_per\_frame != 1)
//{
// AVRational time\_base\_video\_t;
// time\_base\_video\_t.num = icodec->streams\[video\_stream\_idx\]->codec->time\_base.num;
// time\_base\_video\_t.den = icodec->streams\[video\_stream\_idx\]->codec->time\_base.den /icodec->streams\[video\_stream\_idx\]->codec->ticks\_per\_frame;
// picture->pts = av\_rescale\_q\_rnd(picture->pts, time\_base\_video\_t, ovideo\_st->codec->time\_base, AV\_ROUND\_NEAR\_INF);
//}
//else
//{
// picture->pts = av\_rescale\_q\_rnd(picture->pts, icodec->streams\[video\_stream\_idx\]->codec->time\_base, ovideo\_st->codec->time\_base, AV\_ROUND\_NEAR\_INF);
//}
picture->pts = av\_rescale\_q\_rnd(picture->pts, icodec->streams\[video\_stream\_idx\]->codec->time\_base, ovideo\_st->codec->time\_base, AV\_ROUND\_NEAR\_INF);
avcodec\_encode\_video2(cctext,&pkt\_t,picture,&frameFinished);
picture->pts++;
if (frameFinished)
{
write\_frame(ocodec,VIDEO\_ID,pkt\_t);
av\_free\_packet(&pkt\_t);
}
}
return 1;
}
int perform_code2(int stream_type,AVFrame * picture)
{
AVCodecContext *cctext = NULL;
AVPacket pkt_t;
av_init_packet(&pkt_t);
pkt_t.data = NULL; // packet data will be allocated by the encoder
pkt_t.size = 0;
int frameFinished = 0 ;
if (stream\_type == AUDIO\_ID)
{
cctext = oaudio\_st->codec;
//假设进和出的的声道。样本,採样率不同,须要重採样
if(icodec->streams\[audio\_stream\_idx\]->codec->sample\_fmt != (AVSampleFormat)m\_dwBitsPerSample ||
icodec->streams\[audio\_stream\_idx\]->codec->channels != m\_dwChannelCount ||
icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate != m\_dwFrequency)
{
int64\_t pts\_t = picture->pts;
int duration\_t = 0;
if(m\_audiomuxtimebasetrue == 0)
{
duration\_t = (double)cctext->frame\_size \* (icodec->streams\[audio\_stream\_idx\]->codec->time\_base.den /icodec->streams\[audio\_stream\_idx\]->codec->time\_base.num)/
icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate;
}
else
{
duration\_t = (double)cctext->frame\_size \* (icodec->streams\[audio\_stream\_idx\]->time\_base.den /icodec->streams\[audio\_stream\_idx\]->time\_base.num)/
icodec->streams\[audio\_stream\_idx\]->codec->sample\_rate;
}
AVFrame \* pFrameResample = avcodec\_alloc\_frame();
pFrameResample = av\_frame\_alloc();
pFrameResample->nb\_samples = cctext->frame\_size;
pFrameResample->channel\_layout = cctext->channel\_layout;
pFrameResample->channels = cctext->channels;
pFrameResample->format = cctext->sample\_fmt;
pFrameResample->sample\_rate = cctext->sample\_rate;
int error = 0;
if ((error = av\_frame\_get\_buffer(pFrameResample, 0)) < 0)
{
av\_frame\_free(&pFrameResample);
return error;
}
while (av\_audio\_fifo\_size(m\_audiofifo) >= pFrameResample->nb\_samples) //取出写入的未读的包
{
av\_audio\_fifo\_read(m\_audiofifo,(void \*\*)pFrameResample->data,pFrameResample->nb\_samples);
if(m\_is\_first\_audio\_pts == 0)
{
m\_first\_audio\_pts = pts\_t;
m\_is\_first\_audio\_pts = 1;
}
pFrameResample->pts = m\_first\_audio\_pts;
m\_first\_audio\_pts += duration\_t;
pFrameResample->pts = av\_rescale\_q\_rnd(pFrameResample->pts, icodec->streams\[audio\_stream\_idx\]->codec->time\_base, oaudio\_st->codec->time\_base, AV\_ROUND\_NEAR\_INF);
nRet = avcodec\_encode\_audio2(cctext,&pkt\_t,pFrameResample,&frameFinished);
if (nRet>=0 && frameFinished)
{
write\_frame(ocodec,AUDIO\_ID,pkt\_t);
av\_free\_packet(&pkt\_t);
}
}
if (pFrameResample)
{
av\_frame\_free(&pFrameResample);
pFrameResample = NULL;
}
}
else
{
nRet = avcodec\_encode\_audio2(cctext,&pkt\_t,picture,&frameFinished);
if (nRet>=0 && frameFinished)
{
write\_frame(ocodec,AUDIO\_ID,pkt\_t);
av\_free\_packet(&pkt\_t);
}
}
}
else if (stream\_type == VIDEO\_ID)
{
cctext = ovideo\_st->codec;
if(icodec->streams\[video\_stream\_idx\]->codec->ticks\_per\_frame != 1)
{
AVRational time\_base\_video\_t;
time\_base\_video\_t.num = icodec->streams\[video\_stream\_idx\]->codec->time\_base.num;
time\_base\_video\_t.den = icodec->streams\[video\_stream\_idx\]->codec->time\_base.den /icodec->streams\[video\_stream\_idx\]->codec->ticks\_per\_frame;
picture->pts = av\_rescale\_q\_rnd(picture->pts, time\_base\_video\_t, ovideo\_st->codec->time\_base, AV\_ROUND\_NEAR\_INF);
}
else
{
picture->pts = av\_rescale\_q\_rnd(picture->pts, icodec->streams\[video\_stream\_idx\]->codec->time\_base, ovideo\_st->codec->time\_base, AV\_ROUND\_NEAR\_INF);
}
avcodec\_encode\_video2(cctext,&pkt\_t,picture,&frameFinished);
picture->pts++;
if (frameFinished)
{
write\_frame(ocodec,VIDEO\_ID,pkt\_t);
av\_free\_packet(&pkt\_t);
}
}
return 1;
}
void perform_yuv_conversion(AVFrame * pinframe,AVFrame * poutframe)
{
//设置转换context
if (img_convert_ctx_video == NULL)
{
img_convert_ctx_video = sws_getContext(icodec->streams[video_stream_idx]->codec->width, icodec->streams[video_stream_idx]->codec->height,
icodec->streams[video_stream_idx]->codec->pix_fmt,
m_dwWidth, m_dwHeight,
video_pixelfromat,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx_video == NULL)
{
printf("Cannot initialize the conversion context\n");
}
}
//開始转换
sws_scale(img_convert_ctx_video, pinframe->data, pinframe->linesize,
0, icodec->streams[video_stream_idx]->codec->height, poutframe->data, poutframe->linesize);
poutframe->pkt_pts = pinframe->pkt_pts;
poutframe->pkt_dts = pinframe->pkt_dts;
//有时pkt_pts和pkt_dts不同,而且pkt_pts是编码前的dts,这里要给avframe传入pkt_dts而不能用pkt_pts
//poutframe->pts = poutframe->pkt_pts;
poutframe->pts = pinframe->pkt_dts;
}
SwrContext * init_pcm_resample(AVFrame *in_frame, AVFrame *out_frame)
{
SwrContext * swr_ctx = NULL;
swr_ctx = swr_alloc();
if (!swr_ctx)
{
printf("swr_alloc error \n");
return NULL;
}
AVCodecContext * audio_dec_ctx = icodec->streams[audio_stream_idx]->codec;
AVSampleFormat sample_fmt;
sample_fmt = (AVSampleFormat)m_dwBitsPerSample; //样本
if (audio_dec_ctx->channel_layout == 0)
{
audio_dec_ctx->channel_layout = av_get_default_channel_layout(icodec->streams[audio_stream_idx]->codec->channels);
}
/* set options */
av_opt_set_int(swr_ctx, "in_channel_layout", audio_dec_ctx->channel_layout, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", audio_dec_ctx->sample_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", audio_dec_ctx->sample_fmt, 0);
if(icodec->streams[audio_stream_idx]->codec->channels > 2)
{
av_opt_set_int(swr_ctx, "out_channel_layout", av_get_default_channel_layout(m_dwChannelCount), 0);
}
else
{
av_opt_set_int(swr_ctx, "out_channel_layout", audio_dec_ctx->channel_layout, 0);
}
av_opt_set_int(swr_ctx, "out_sample_rate", audio_dec_ctx->sample_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", sample_fmt, 0);
swr_init(swr_ctx);
int64\_t src\_nb\_samples = in\_frame->nb\_samples;
out\_frame->nb\_samples = av\_rescale\_rnd(swr\_get\_delay(swr\_ctx,oaudio\_st->codec->sample\_rate) + src\_nb\_samples,
oaudio\_st->codec->sample\_rate, oaudio\_st->codec->sample\_rate, AV\_ROUND\_UP);
int ret = av\_samples\_alloc(out\_frame->data, &out\_frame->linesize\[0\],
icodec->streams\[audio\_stream\_idx\]->codec->channels, out\_frame->nb\_samples,oaudio\_st->codec->sample\_fmt,1);
if (ret < 0)
{
return NULL;
}
#ifdef AUDIO_FIFO
m_audiofifo = av_audio_fifo_alloc(oaudio_st->codec->sample_fmt, oaudio_st->codec->channels,
out_frame->nb_samples);
#else
//pcm分包初始化
int buffersize = av_samples_get_buffer_size(NULL, oaudio_st->codec->channels,
2048, oaudio_st->codec->sample_fmt, 1);
m_fifo = av_fifo_alloc(buffersize);
#endif
return swr_ctx;
}
int preform_pcm_resample(SwrContext * pSwrCtx,AVFrame *in_frame, AVFrame *out_frame)
{
int ret = 0;
if (pSwrCtx != NULL)
{
ret = swr_convert(pSwrCtx, out_frame->data, out_frame->nb_samples,
(const uint8_t**)in_frame->data, in_frame->nb_samples);
if (ret < 0)
{
return -1;
}
//改动分包内存
int buffersize = av_samples_get_buffer_size(&out_frame->linesize[0], oaudio_st->codec->channels,
ret, oaudio_st->codec->sample_fmt, 1);
#ifdef AUDIO_FIFO
int sss = av_audio_fifo_size(m_audiofifo);
sss = av_audio_fifo_realloc(m_audiofifo, av_audio_fifo_size(m_audiofifo) + out_frame->nb_samples);
av_audio_fifo_write(m_audiofifo,(void **)out_frame->data,out_frame->nb_samples);
sss = av_audio_fifo_size(m_audiofifo);
#else
int sss = av_fifo_size(m_fifo);
sss = av_fifo_realloc2(m_fifo, av_fifo_size(m_fifo) + out_frame->linesize[0]);
sss = av_fifo_size(m_fifo);
av_fifo_generic_write(m_fifo, out_frame->data[0], out_frame->linesize[0], NULL);
#endif
out_frame->pkt_pts = in_frame->pkt_pts;
out_frame->pkt_dts = in_frame->pkt_dts;
//有时pkt_pts和pkt_dts不同。而且pkt_pts是编码前的dts,这里要给avframe传入pkt_dts而不能用pkt_pts
//out_frame->pts = out_frame->pkt_pts;
out_frame->pts = in_frame->pkt_dts;
}
return 0;
}
void uinit_pcm_resample(AVFrame * poutframe,SwrContext * swr_ctx)
{
if (poutframe)
{
avcodec_free_frame(&poutframe);
poutframe = NULL;
}
if (swr_ctx)
{
swr_free(&swr_ctx);
swr_ctx = NULL;
}
#ifdef AUDIO_FIFO
if(m_audiofifo)
{
av_audio_fifo_free(m_audiofifo);
m_audiofifo = NULL;
}
#else
//析构pcm分包结构
if(m_fifo)
{
av_fifo_free(m_fifo);
m_fifo = NULL;
}
#endif
}
int audio_support(AVCodec * pCodec,int *channel,int * playout,int *samplePerSec,AVSampleFormat_t * sample_fmt)
{
//支持的声道
if(NULL != pCodec->channel_layouts)
{
uint64_t layout = av_get_default_channel_layout(*channel);
if(0 == layout)
{
return 0;
}
int i = 0;
int j = 0;
while(0 != pCodec->channel\_layouts\[j\])
{
printf("pCodec->channel\_layouts\[j\] : %d\\n",pCodec->channel\_layouts\[j\]);
++j;
}
while(0 != pCodec->channel\_layouts\[i\])
{
if(layout == pCodec->channel\_layouts\[i\])
{
break;
}
++i;
}
//未找到
if(0 == pCodec->channel\_layouts\[i\])
{
\*playout = pCodec->channel\_layouts\[i-1\];
\*channel = av\_get\_channel\_layout\_nb\_channels(\*playout);
}
}
//支持的採样率
if(NULL != pCodec->supported\_samplerates)
{
int i = 0;
int j = 0;
while(0 != pCodec->supported\_samplerates\[j\])
{
printf("pCodec->supported\_samplerates\[j\] : %d\\n",pCodec->supported\_samplerates\[j\]);
++j;
}
while(0 != pCodec->supported\_samplerates\[i\])
{
if(\*samplePerSec == pCodec->supported\_samplerates\[i\])
{
break;
}
++i;
}
//未找到
if(0 == pCodec->supported\_samplerates\[i\])
{
\*samplePerSec = pCodec->supported\_samplerates\[i-1\];
}
}
//支持的样本
if(NULL != pCodec->sample\_fmts)
{
int i = 0;
int j = 0;
while(-1 != pCodec->sample\_fmts\[j\])
{
printf("pCodec->sample\_fmts\[j\] : %d\\n",pCodec->sample\_fmts\[j\]);
++j;
}
while(-1 != pCodec->sample\_fmts\[i\])
{
if(\*sample\_fmt == pCodec->sample\_fmts\[i\])
{
break;
}
++i;
}
//未找到
if(-1 == pCodec->sample\_fmts\[i\])
{
\*sample\_fmt = (AVSampleFormat\_t)pCodec->sample\_fmts\[i-1\];
}
}
return 1;
}
int video_support(AVCodec * pCodec,AVPixelFormat * video_pixelfromat)
{
//支持的yuv格式
if(NULL != pCodec->pix_fmts)
{
int i = 0;
int j = 0;
while(0 != pCodec->pix_fmts[j])
{
printf("pCodec->pix_fmts[j] : %d\n",pCodec->pix_fmts[j]);
++j;
}
while(0 != pCodec->pix_fmts[i])
{
if(*video_pixelfromat == pCodec->pix_fmts[i])
{
break;
}
++i;
}
//未找到
if(-1 == pCodec->pix_fmts[i])
{
*video_pixelfromat = pCodec->pix_fmts[i-1];
}
}
return 1;
}
void write_frame(AVFormatContext *ocodec,int ID,AVPacket pkt_t)
{
int64_t pts = 0, dts = 0;
int nRet = -1;
if(ID == VIDEO\_ID)
{
AVPacket videopacket\_t;
av\_init\_packet(&videopacket\_t);
videopacket\_t.pts = av\_rescale\_q\_rnd(pkt\_t.pts, icodec->streams\[video\_stream\_idx\]->time\_base, ovideo\_st->time\_base, AV\_ROUND\_NEAR\_INF);
videopacket\_t.dts = av\_rescale\_q\_rnd(pkt\_t.dts, icodec->streams\[video\_stream\_idx\]->time\_base, ovideo\_st->time\_base, AV\_ROUND\_NEAR\_INF);
videopacket\_t.duration = av\_rescale\_q(pkt\_t.duration,icodec->streams\[video\_stream\_idx\]->time\_base, ovideo\_st->time\_base);
videopacket\_t.flags = pkt\_t.flags;
videopacket\_t.stream\_index = VIDEO\_ID; //这里add\_out\_stream顺序有影响
videopacket\_t.data = pkt\_t.data;
videopacket\_t.size = pkt\_t.size;
nRet = av\_interleaved\_write\_frame(ocodec, &videopacket\_t);
if (nRet != 0)
{
printf("error av\_interleaved\_write\_frame \_ video\\n");
}
printf("video\\n");
}
else if(ID == AUDIO\_ID)
{
AVPacket audiopacket\_t;
av\_init\_packet(&audiopacket\_t);
if(m\_audiomuxtimebasetrue == 0)
{
audiopacket\_t.pts = av\_rescale\_q\_rnd(pkt\_t.pts, icodec->streams\[audio\_stream\_idx\]->codec->time\_base, oaudio\_st->time\_base, AV\_ROUND\_NEAR\_INF);
audiopacket\_t.dts = av\_rescale\_q\_rnd(pkt\_t.dts, icodec->streams\[audio\_stream\_idx\]->codec->time\_base, oaudio\_st->time\_base, AV\_ROUND\_NEAR\_INF);
audiopacket\_t.duration = av\_rescale\_q(pkt\_t.duration,icodec->streams\[audio\_stream\_idx\]->codec->time\_base, oaudio\_st->time\_base);
}
else
{
audiopacket\_t.pts = av\_rescale\_q\_rnd(pkt\_t.pts, icodec->streams\[audio\_stream\_idx\]->time\_base, oaudio\_st->time\_base, AV\_ROUND\_NEAR\_INF);
audiopacket\_t.dts = av\_rescale\_q\_rnd(pkt\_t.dts, icodec->streams\[audio\_stream\_idx\]->time\_base, oaudio\_st->time\_base, AV\_ROUND\_NEAR\_INF);
audiopacket\_t.duration = av\_rescale\_q(pkt\_t.duration,icodec->streams\[audio\_stream\_idx\]->time\_base, oaudio\_st->time\_base);
}
audiopacket\_t.flags = pkt\_t.flags;
audiopacket\_t.stream\_index = AUDIO\_ID; //这里add\_out\_stream顺序有影响
audiopacket\_t.data = pkt\_t.data;
audiopacket\_t.size = pkt\_t.size;
//加入过滤器
if(! strcmp( ocodec->oformat-> name, "mp4" ) ||
!strcmp (ocodec ->oformat ->name , "mov" ) ||
!strcmp (ocodec ->oformat ->name , "3gp" ) ||
!strcmp (ocodec ->oformat ->name , "flv" ))
{
if (oaudio\_st->codec->codec\_id == AV\_CODEC\_ID\_AAC)
{
if (vbsf\_aac\_adtstoasc != NULL)
{
AVPacket filteredPacket = audiopacket\_t;
int a = av\_bitstream\_filter\_filter(vbsf\_aac\_adtstoasc,
oaudio\_st->codec, NULL,&filteredPacket.data, &filteredPacket.size,
audiopacket\_t.data, audiopacket\_t.size, audiopacket\_t.flags & AV\_PKT\_FLAG\_KEY);
if (a > 0)
{
av\_free\_packet(&audiopacket\_t);
filteredPacket.destruct = av\_destruct\_packet;
audiopacket\_t = filteredPacket;
}
else if (a == 0)
{
audiopacket\_t = filteredPacket;
}
else if (a < 0)
{
fprintf(stderr, "%s failed for stream %d, codec %s",
vbsf\_aac\_adtstoasc->filter->name,audiopacket\_t.stream\_index,oaudio\_st->codec->codec ? oaudio\_st->codec->codec->name : "copy");
av\_free\_packet(&audiopacket\_t);
}
}
}
}
nRet = av\_interleaved\_write\_frame(ocodec, &audiopacket\_t);
if (nRet != 0)
{
printf("error av\_interleaved\_write\_frame \_ audio\\n");
}
printf("audio\\n");
}
}
int transcode()
{
AVFrame *pinframe = NULL;
AVFrame * pout_video_frame = NULL;
AVFrame * pout_audio_frame = NULL;
SwrContext * swr_ctx = NULL;
int dst_nb_samples = -1;
int resampled_data_size = 0;
//分配一个AVFrame并设置默认值
pinframe = avcodec_alloc_frame();
pout_video_frame = avcodec_alloc_frame();
pout_audio_frame = avcodec_alloc_frame();
pinframe->pts = 0;
pout_video_frame->pts = 0;
pout_audio_frame->pts = 0;
if (pinframe == NULL)
{
printf("avcodec_alloc_frame pinframe error\n");
return 0;
}
//video
if (pout_video_frame == NULL)
{
printf("avcodec_alloc_frame pout_video_frame error\n");
return 0;
}
int Out_size = avpicture_get_size(video_pixelfromat, m_dwWidth,m_dwHeight);
uint8_t * pOutput_buf =( uint8_t *)malloc(Out_size * 3 * sizeof(char)); //最大分配的空间,能满足yuv的各种格式
avpicture_fill((AVPicture *)pout_video_frame, (unsigned char *)pOutput_buf, video_pixelfromat,m_dwWidth, m_dwHeight); //内存关联
//audio
if (pout\_audio\_frame == NULL)
{
printf("avcodec\_alloc\_frame pout\_audio\_frame error\\n");
return 0;
}
avcodec\_get\_frame\_defaults(pout\_audio\_frame);
//開始解包
while (1)
{
av\_init\_packet(&pkt);
if (av\_read\_frame(icodec, &pkt) < 0)
{
break;
}
//视频
if(pkt.stream\_index == video\_stream\_idx)
{
//假设是视频须要编解码
if(bit\_rate != icodec->streams\[video\_stream\_idx\]->codec->bit\_rate ||
m\_dwWidth != icodec->streams\[video\_stream\_idx\]->codec->width ||
m\_dwHeight != icodec->streams\[video\_stream\_idx\]->codec->height ||
video\_codecID != icodec->streams\[video\_stream\_idx\]->codec->codec\_id ||
m\_dbFrameRate != av\_q2d(icodec->streams\[video\_stream\_idx\]->r\_frame\_rate))
{
nRet = perform\_decode(VIDEO\_ID,pinframe);
if (nRet == 0)
{
perform\_yuv\_conversion(pinframe,pout\_video\_frame);
#ifdef AUDIO_FIFO
nRet = perform_code2(VIDEO_ID,pout_video_frame);
#else
nRet = perform_code(VIDEO_ID,pout_video_frame);
#endif
}
}
else
{
write_frame(ocodec,VIDEO_ID,pkt);
}
}
//音频
else if (pkt.stream_index == audio_stream_idx)
{
//假设是音频须要编解码
if(audio_codecID != icodec->streams[audio_stream_idx]->codec->codec_id ||
1 != icodec->streams[audio_stream_idx]->codec->sample_fmt)
{
nRet = perform_decode(AUDIO_ID,pinframe);
if (nRet == 0)
{
//假设进和出的的声道,样本。採样率不同,须要重採样
if(icodec->streams[audio_stream_idx]->codec->sample_fmt != (AVSampleFormat)m_dwBitsPerSample ||
icodec->streams[audio_stream_idx]->codec->channels != m_dwChannelCount ||
icodec->streams[audio_stream_idx]->codec->sample_rate != m_dwFrequency)
{
if (swr_ctx == NULL)
{
swr_ctx = init_pcm_resample(pinframe,pout_audio_frame);
}
preform_pcm_resample(swr_ctx,pinframe,pout_audio_frame);
#ifdef AUDIO_FIFO
perform_code2(AUDIO_ID,pout_audio_frame);
#else
perform_code(AUDIO_ID,pout_audio_frame);
#endif
}
else
{
pinframe->pts = pinframe->pkt_pts;
#ifdef AUDIO_FIFO
perform_code2(AUDIO_ID,pinframe);
#else
perform_code(AUDIO_ID,pinframe);
#endif
}
}
}
else
{
write_frame(ocodec,AUDIO_ID,pkt);
}
}
}
if (pinframe)
{
avcodec_free_frame(&pinframe);
pinframe = NULL;
}
if (pout_video_frame)
{
avcodec_free_frame(&pout_video_frame);
pout_video_frame = NULL;
}
uinit_pcm_resample(pout_audio_frame,swr_ctx);
return 1;
}
实现效果:
手机扫一扫
移动阅读更方便
你可能感兴趣的文章