#include "muxer_ffmpeg.h" #include "headers_ffmpeg.h" #include "muxer_define.h" #include "encoder_video.h" #include "encoder_video_factory.h" #include "record_desktop.h" #include "sws_helper.h" #include "encoder_aac.h" #include "filter_amix.h" #include "filter_aresample.h" #include "record_audio.h" #include "ring_buffer.h" #include "error_define.h" #include "log_helper.h" namespace am { muxer_ffmpeg::muxer_ffmpeg() { ffmpeg_register_all(); ffmpeg_register_devices(); // 添加设备注册,支持RTSP等网络协议 _v_stream = NULL; _a_stream = NULL; _fmt = NULL; _fmt_ctx = NULL; _base_time = -1; } muxer_ffmpeg::~muxer_ffmpeg() { stop(); cleanup(); } int muxer_ffmpeg::init(const char *output_file, record_desktop *source_desktop, record_audio **source_audios, const int source_audios_nb, const MUX_SETTING_T &setting) { int error = AE_NO; int ret = 0; do { al_info("start to initialize muxer ,output:%s ", output_file); error = alloc_oc(output_file, setting); if (error != AE_NO) break; if (_fmt->video_codec != AV_CODEC_ID_NONE) { error = add_video_stream(setting, source_desktop); if (error != AE_NO) break; } if (_fmt->audio_codec != AV_CODEC_ID_NONE && source_audios_nb) { error = add_audio_stream(setting, source_audios, source_audios_nb); if (error != AE_NO) break; } error = open_output(output_file, setting); if (error != AE_NO) break; av_dump_format(_fmt_ctx, 0, NULL, 1); _inited = true; } while (0); if (error != AE_NO) { cleanup(); al_debug("muxer ffmpeg initialize failed:%s %d", err2str(error), ret); } return error; } int muxer_ffmpeg::start() { std::lock_guard lock(_mutex); int error = AE_NO; if (_running == true) { return AE_NO; } if (_inited == false) { return AE_NEED_INIT; } _base_time = av_gettime_relative(); if (_v_stream && _v_stream->v_enc) _v_stream->v_enc->start(); if (_a_stream && _a_stream->a_enc) _a_stream->a_enc->start(); if (_a_stream && _a_stream->a_nb >= 2 && _a_stream->a_filter_amix) _a_stream->a_filter_amix->start(); if (_a_stream && _a_stream->a_nb < 2 && _a_stream->a_filter_aresample) { for (int i = 0; i < _a_stream->a_nb; i++) { _a_stream->a_filter_aresample[i]->start(); } } if (_a_stream && _a_stream->a_src) { for (int i = 0; i < _a_stream->a_nb; i++) { if (_a_stream->a_src[i]) _a_stream->a_src[i]->start(); } } if (_v_stream && _v_stream->v_src) _v_stream->v_src->start(); _running = true; return error; } int muxer_ffmpeg::stop() { std::lock_guard lock(_mutex); if (_running == false) return AE_NO; _running = false; al_debug("try to stop muxer...."); al_debug("stop audio recorder..."); if (_a_stream && _a_stream->a_src) { for (int i = 0; i < _a_stream->a_nb; i++) { _a_stream->a_src[i]->stop(); } } al_debug("stop video recorder..."); if (_v_stream && _v_stream->v_src) _v_stream->v_src->stop(); al_debug("stop audio amix filter..."); if (_a_stream && _a_stream->a_filter_amix) _a_stream->a_filter_amix->stop(); al_debug("stop audio aresampler filter..."); if (_a_stream && _a_stream->a_filter_aresample) { for (int i = 0; i < _a_stream->a_nb; i++) { _a_stream->a_filter_aresample[i]->stop(); } } al_debug("stop video encoder..."); if (_v_stream && _v_stream->v_enc) _v_stream->v_enc->stop(); al_debug("stop audio encoder..."); if (_a_stream) { if (_a_stream->a_enc) _a_stream->a_enc->stop(); } al_debug("write file trailer..."); if (_fmt_ctx) av_write_trailer(_fmt_ctx); //must write trailer ,otherwise file can not play al_debug("muxer stopped..."); return AE_NO; } int muxer_ffmpeg::pause() { _paused = true; return 0; } int muxer_ffmpeg::resume() { _paused = false; return 0; } // Exposed getters for encoder instances encoder_video* muxer_ffmpeg::get_video_encoder() const { return _v_stream ? _v_stream->v_enc : nullptr; } encoder_aac* muxer_ffmpeg::get_audio_encoder() const { return _a_stream ? _a_stream->a_enc : nullptr; } void muxer_ffmpeg::on_desktop_data(AVFrame *frame) { if (_running == false || _paused == true || !_v_stream || !_v_stream->v_enc || !_v_stream->v_sws) { return; } int len = 0, ret = AE_NO; uint8_t *yuv_data = NULL; ret = _v_stream->v_sws->convert(frame, &yuv_data, &len); if (ret == AE_NO && yuv_data && len) { _v_stream->v_enc->put(yuv_data, len, frame); if (_on_yuv_data && _preview_enabled == true) _on_yuv_data(yuv_data, len, frame->width, frame->height, 0); } } void muxer_ffmpeg::on_desktop_error(int error) { al_fatal("on desktop capture error:%d", error); } int getPcmDB(const unsigned char *pcmdata, size_t size) { int db = 0; float value = 0; double sum = 0; double average = 0; int bit_per_sample = 32; int byte_per_sample = bit_per_sample / 8; int channel_num = 2; for (int i = 0; i < size; i += channel_num * byte_per_sample) { memcpy(&value, pcmdata + i, byte_per_sample); sum += abs(value); } average = sum / (double) (size / byte_per_sample / channel_num); if (average > 0) { db = (int) (20 * log10f(average)); } al_debug("%d %f %f", db, average, sum); return db; } static int pcm_fltp_db_count(AVFrame *frame, int channels) { int i = 0, ch = 0; int ndb = 0; float value = 0.; float *ch_left = (float *) frame->data[0]; //float *ch_right = (float *)frame->data[1]; for (i = 0; i < frame->nb_samples; i++) { value += fabs(ch_left[i]); } value = value / frame->nb_samples; if (0 != value) { ndb = (int) (20.0 * log10((value / 1.0))); } else ndb = -100; return ndb; } void muxer_ffmpeg::on_audio_data(AVFrame *frame, int index) { if (_running == false || _paused == true) return; if (_a_stream->a_filter_amix != nullptr) _a_stream->a_filter_amix->add_frame(frame, index); else if (_a_stream->a_filter_aresample != nullptr && _a_stream->a_filter_aresample[index] != nullptr) { _a_stream->a_filter_aresample[index]->add_frame(frame); } return; } void muxer_ffmpeg::on_audio_error(int error, int index) { al_fatal("on audio capture error:%d with stream index:%d", error, index); } void muxer_ffmpeg::on_filter_amix_data(AVFrame *frame, int) { if (_running == false || !_a_stream->a_enc) return; AUDIO_SAMPLE *resamples = _a_stream->a_resamples[0]; int copied_len = 0; int sample_len = ffmpeg_get_buffer_size((AVSampleFormat) frame->format, ffmpeg_get_frame_channels(frame), frame->nb_samples, 1); sample_len = ffmpeg_get_buffer_size((AVSampleFormat) frame->format, ffmpeg_get_frame_channels(frame), frame->nb_samples, 1); #ifdef _DEBUG //al_debug("dg:%d", pcm_fltp_db_count(frame, frame->channels)); #endif int remain_len = sample_len; //for data is planar,should copy data[0] data[1] to correct buff pos if (av_sample_fmt_is_planar((AVSampleFormat) frame->format) == 0) { while (remain_len > 0) { //cache pcm copied_len = min(resamples->size - resamples->sample_in, remain_len); if (copied_len) { memcpy(resamples->buff + resamples->sample_in, frame->data[0] + sample_len - remain_len, copied_len); resamples->sample_in += copied_len; remain_len = remain_len - copied_len; } //got enough pcm to encoder,resample and mix if (resamples->sample_in == resamples->size) { _a_stream->a_enc->put(resamples->buff, resamples->size, frame); resamples->sample_in = 0; } } } else { // planar: copy each channel plane into contiguous planes in buffer const int channels = ffmpeg_get_frame_channels(frame); while (remain_len > 0) { copied_len = min(resamples->size - resamples->sample_in, remain_len); if (copied_len) { const int copied_per_plane = copied_len / channels; const int written_per_plane = resamples->sample_in / channels; const int plane_size = resamples->size / channels; const int src_offset = (sample_len - remain_len) / channels; for (int ch = 0; ch < channels; ++ch) { memcpy(resamples->buff + ch * plane_size + written_per_plane, frame->data[ch] + src_offset, copied_per_plane); } resamples->sample_in += copied_len; remain_len = remain_len - copied_len; } //got enough pcm to encoder,resample and mix if (resamples->sample_in == resamples->size) { _a_stream->a_enc->put(resamples->buff, resamples->size, frame); resamples->sample_in = 0; } } } } void muxer_ffmpeg::on_filter_amix_error(int error, int) { al_fatal("on filter amix audio error:%d", error); } void muxer_ffmpeg::on_filter_aresample_data(AVFrame *frame, int index) { if (_running == false || !_a_stream->a_enc) return; AUDIO_SAMPLE *resamples = _a_stream->a_resamples[index]; int copied_len = 0; int sample_len = ffmpeg_get_buffer_size((AVSampleFormat) frame->format, ffmpeg_get_frame_channels(frame), frame->nb_samples, 1); sample_len = ffmpeg_get_buffer_size((AVSampleFormat) frame->format, ffmpeg_get_frame_channels(frame), frame->nb_samples, 1); int remain_len = sample_len; //for data is planar,should copy data[0] data[1] to correct buff pos if (av_sample_fmt_is_planar((AVSampleFormat) frame->format) == 0) { while (remain_len > 0) { //cache pcm copied_len = min(resamples->size - resamples->sample_in, remain_len); if (copied_len) { memcpy(resamples->buff + resamples->sample_in, frame->data[0] + sample_len - remain_len, copied_len); resamples->sample_in += copied_len; remain_len = remain_len - copied_len; } //got enough pcm to encoder,resample and mix if (resamples->sample_in == resamples->size) { _a_stream->a_enc->put(resamples->buff, resamples->size, frame); resamples->sample_in = 0; } } } else { // planar: copy each channel plane into contiguous planes in buffer const int channels = ffmpeg_get_frame_channels(frame); while (remain_len > 0) { copied_len = min(resamples->size - resamples->sample_in, remain_len); if (copied_len) { const int copied_per_plane = copied_len / channels; const int written_per_plane = resamples->sample_in / channels; const int plane_size = resamples->size / channels; const int src_offset = (sample_len - remain_len) / channels; for (int ch = 0; ch < channels; ++ch) { memcpy(resamples->buff + ch * plane_size + written_per_plane, frame->data[ch] + src_offset, copied_per_plane); } resamples->sample_in += copied_len; remain_len = remain_len - copied_len; } if (resamples->sample_in == resamples->size) { _a_stream->a_enc->put(resamples->buff, resamples->size, frame); resamples->sample_in = 0; } } } } void muxer_ffmpeg::on_filter_aresample_error(int error, int index) { al_fatal("on filter aresample[%d] audio error:%d", index, error); } void muxer_ffmpeg::on_enc_264_data(AVPacket *packet) { if (_running && _v_stream) { write_video(packet); } } void muxer_ffmpeg::on_enc_264_error(int error) { al_fatal("on desktop encode error:%d", error); } void muxer_ffmpeg::on_enc_aac_data(AVPacket *packet) { if (_running && _a_stream) { write_audio(packet); } } void muxer_ffmpeg::on_enc_aac_error(int error) { al_fatal("on audio encode error:%d", error); } int muxer_ffmpeg::alloc_oc(const char *output_file, const MUX_SETTING_T &setting) { _output_file = std::string(output_file); int error = AE_NO; int ret = 0; do { // 检查协议类型并指定相应的输出格式 const char* format_name = NULL; std::string url_str(output_file); if (url_str.find("rtmp://") == 0 || url_str.find("rtmps://") == 0) { format_name = "flv"; } else if (url_str.find("rtsp://") == 0) { // RTSP推流使用RTSP muxer,由FFmpeg管理RTP会话 format_name = "rtsp"; al_debug("RTSP URL detected, using RTSP format"); } ret = avformat_alloc_output_context2(&_fmt_ctx, NULL, format_name, output_file); if (ret < 0 || !_fmt_ctx) { al_debug("avformat_alloc_output_context2 failed with ret=%d, format=%s, url=%s", ret, format_name ? format_name : "auto", output_file); error = AE_FFMPEG_ALLOC_CONTEXT_FAILED; break; } _fmt = _fmt_ctx->oformat; } while (0); return error; } int muxer_ffmpeg::add_video_stream(const MUX_SETTING_T &setting, record_desktop *source_desktop) { int error = AE_NO; int ret = 0; _v_stream = new MUX_STREAM(); memset(_v_stream, 0, sizeof(MUX_STREAM)); _v_stream->v_src = source_desktop; _v_stream->pre_pts = -1; _v_stream->v_src->registe_cb(std::bind(&muxer_ffmpeg::on_desktop_data, this, std::placeholders::_1), std::bind(&muxer_ffmpeg::on_desktop_error, this, std::placeholders::_1)); RECORD_DESKTOP_RECT v_rect = _v_stream->v_src->get_rect(); do { error = encoder_video_new(setting.v_encoder_id, &_v_stream->v_enc); if (error != AE_NO) break; error = _v_stream->v_enc->init(setting.v_out_width, setting.v_out_height, setting.v_frame_rate, setting.v_bit_rate, setting.v_qb); if (error != AE_NO) break; _v_stream->v_enc->registe_cb(std::bind(&muxer_ffmpeg::on_enc_264_data, this, std::placeholders::_1), std::bind(&muxer_ffmpeg::on_enc_264_error, this, std::placeholders::_1)); _v_stream->v_sws = new sws_helper(); error = _v_stream->v_sws->init(_v_stream->v_src->get_pixel_fmt(), v_rect.right - v_rect.left, v_rect.bottom - v_rect.top, AV_PIX_FMT_YUV420P, setting.v_out_width, setting.v_out_height); if (error != AE_NO) break; const AVCodec *codec = avcodec_find_encoder(_v_stream->v_enc->get_codec_id()); if (!codec) { error = AE_FFMPEG_FIND_ENCODER_FAILED; break; } // FFmpeg 7兼容性:不再直接修改AVOutputFormat的video_codec字段 // 编码器信息通过AVStream的codecpar设置 AVStream *st = avformat_new_stream(_fmt_ctx, codec); if (!st) { error = AE_FFMPEG_NEW_STREAM_FAILED; break; } ffmpeg_set_stream_codec_id(st, _v_stream->v_enc->get_codec_id()); ffmpeg_set_stream_bit_rate(st, setting.v_bit_rate); ffmpeg_set_stream_codec_type(st, AVMEDIA_TYPE_VIDEO); // 使用编码器/设置的帧率作为时间基(1/fps),避免异常的帧率显示 st->time_base = {1, setting.v_frame_rate}; ffmpeg_set_stream_pix_fmt(st, AV_PIX_FMT_YUV420P); ffmpeg_set_stream_dimensions(st, setting.v_out_width, setting.v_out_height); // 正确设置平均帧率为 fps/1 st->avg_frame_rate = {setting.v_frame_rate, 1}; // 始终为视频流设置extradata(SPS/PPS等),RTSP/SDP需要该信息 { uint8_t *extradata = (uint8_t *) av_memdup(_v_stream->v_enc->get_extradata(), _v_stream->v_enc->get_extradata_size()); ffmpeg_set_stream_extradata(st, extradata, _v_stream->v_enc->get_extradata_size()); } _v_stream->st = st; _v_stream->setting = setting; //_v_stream->filter = av_bitstream_filter_init("h264_mp4toannexb"); } while (0); return error; } int muxer_ffmpeg::add_audio_stream(const MUX_SETTING_T &setting, record_audio **source_audios, const int source_audios_nb) { int error = AE_NO; int ret = 0; _a_stream = new MUX_STREAM(); memset(_a_stream, 0, sizeof(MUX_STREAM)); _a_stream->a_nb = source_audios_nb; _a_stream->a_filter_aresample = new filter_aresample *[_a_stream->a_nb]; _a_stream->a_resamples = new AUDIO_SAMPLE *[_a_stream->a_nb]; _a_stream->a_samples = new AUDIO_SAMPLE *[_a_stream->a_nb]; _a_stream->a_src = new record_audio *[_a_stream->a_nb]; _a_stream->pre_pts = -1; do { _a_stream->a_enc = new encoder_aac(); error = _a_stream->a_enc->init(setting.a_nb_channel, setting.a_sample_rate, setting.a_sample_fmt, setting.a_bit_rate); if (error != AE_NO) break; _a_stream->a_enc->registe_cb(std::bind(&muxer_ffmpeg::on_enc_aac_data, this, std::placeholders::_1), std::bind(&muxer_ffmpeg::on_enc_aac_error, this, std::placeholders::_1)); for (int i = 0; i < _a_stream->a_nb; i++) { _a_stream->a_src[i] = source_audios[i]; _a_stream->a_src[i]->registe_cb(std::bind(&muxer_ffmpeg::on_audio_data, this, std::placeholders::_1, std::placeholders::_2), std::bind(&muxer_ffmpeg::on_audio_error, this, std::placeholders::_1, std::placeholders::_2), i); _a_stream->a_filter_aresample[i] = new filter_aresample(); _a_stream->a_resamples[i] = new AUDIO_SAMPLE({NULL, 0, 0}); FILTER_CTX ctx_in = {0}, ctx_out = {0}; ctx_in.time_base = _a_stream->a_src[i]->get_time_base(); ctx_in.channel_layout = ffmpeg_get_default_channel_layout( _a_stream->a_src[i]->get_channel_num()); ctx_in.nb_channel = _a_stream->a_src[i]->get_channel_num(); ctx_in.sample_fmt = _a_stream->a_src[i]->get_fmt(); ctx_in.sample_rate = _a_stream->a_src[i]->get_sample_rate(); ctx_out.time_base = {1, AV_TIME_BASE}; ctx_out.channel_layout = ffmpeg_get_default_channel_layout(setting.a_nb_channel); ctx_out.nb_channel = setting.a_nb_channel; ctx_out.sample_fmt = setting.a_sample_fmt; ctx_out.sample_rate = setting.a_sample_rate; _a_stream->a_filter_aresample[i]->init(ctx_in, ctx_out, i); _a_stream->a_filter_aresample[i]->registe_cb( std::bind(&muxer_ffmpeg::on_filter_aresample_data, this, std::placeholders::_1, std::placeholders::_2), std::bind(&muxer_ffmpeg::on_filter_aresample_error, this, std::placeholders::_1, std::placeholders::_2)); _a_stream->a_resamples[i]->size = av_samples_get_buffer_size(NULL, setting.a_nb_channel, _a_stream->a_enc->get_nb_samples(), setting.a_sample_fmt, 1); _a_stream->a_resamples[i]->buff = new uint8_t[_a_stream->a_resamples[i]->size]; _a_stream->a_samples[i] = new AUDIO_SAMPLE({NULL, 0, 0}); _a_stream->a_samples[i]->size = av_samples_get_buffer_size(NULL, _a_stream->a_src[i]->get_channel_num(), _a_stream->a_enc->get_nb_samples(), _a_stream->a_src[i]->get_fmt(), 1); _a_stream->a_samples[i]->buff = new uint8_t[_a_stream->a_samples[i]->size]; } if (_a_stream->a_nb >= 2) { _a_stream->a_filter_amix = new am::filter_amix(); error = _a_stream->a_filter_amix->init({NULL, NULL, _a_stream->a_src[0]->get_time_base(), _a_stream->a_src[0]->get_sample_rate(), _a_stream->a_src[0]->get_fmt(), _a_stream->a_src[0]->get_channel_num(), (int64_t)ffmpeg_get_default_channel_layout( _a_stream->a_src[0]->get_channel_num())}, {NULL, NULL, _a_stream->a_src[1]->get_time_base(), _a_stream->a_src[1]->get_sample_rate(), _a_stream->a_src[1]->get_fmt(), _a_stream->a_src[1]->get_channel_num(), (int64_t)ffmpeg_get_default_channel_layout( _a_stream->a_src[1]->get_channel_num())}, {NULL, NULL, {1, AV_TIME_BASE}, setting.a_sample_rate, setting.a_sample_fmt, setting.a_nb_channel, (int64_t)ffmpeg_get_default_channel_layout( setting.a_nb_channel)}); if (error != AE_NO) { break; } _a_stream->a_filter_amix->registe_cb(std::bind(&muxer_ffmpeg::on_filter_amix_data, this, std::placeholders::_1, std::placeholders::_2), std::bind(&muxer_ffmpeg::on_filter_amix_error, this, std::placeholders::_1, std::placeholders::_2)); } const AVCodec *codec = avcodec_find_encoder(_a_stream->a_enc->get_codec_id()); if (!codec) { error = AE_FFMPEG_FIND_ENCODER_FAILED; break; } // FFmpeg 7兼容性:不再直接修改AVOutputFormat的codec字段 // 这些字段在新版本中是只读的,编码器信息通过AVStream设置 AVCodecID audio_codec_id = _a_stream->a_enc->get_codec_id(); AVStream *st = avformat_new_stream(_fmt_ctx, codec); if (!st) { error = AE_FFMPEG_NEW_STREAM_FAILED; break; } av_dict_set(&st->metadata, "title", "Track1", 0); // 设置音频流编码器参数 ffmpeg_set_stream_codec_id(st, audio_codec_id); ffmpeg_set_stream_codec_type(st, AVMEDIA_TYPE_AUDIO); ffmpeg_set_stream_bit_rate(st, setting.a_bit_rate); // 设置采样率到AVStream的codecpar中(FFmpeg 7兼容性) #if FFMPEG_VERSION_MAJOR >= 7 st->codecpar->sample_rate = setting.a_sample_rate; av_channel_layout_default(&st->codecpar->ch_layout, setting.a_nb_channel); st->codecpar->format = setting.a_sample_fmt; #elif FFMPEG_VERSION_MAJOR >= 4 st->codecpar->sample_rate = setting.a_sample_rate; st->codecpar->channels = setting.a_nb_channel; st->codecpar->channel_layout = ffmpeg_get_default_channel_layout(setting.a_nb_channel); st->codecpar->format = setting.a_sample_fmt; #else st->codec->sample_rate = setting.a_sample_rate; st->codec->channels = setting.a_nb_channel; st->codec->channel_layout = ffmpeg_get_default_channel_layout(setting.a_nb_channel); st->codec->sample_fmt = setting.a_sample_fmt; #endif st->time_base = {1, setting.a_sample_rate}; AVCodecContext *codec_ctx = ffmpeg_get_codec_context(st); codec_ctx->bit_rate = setting.a_bit_rate; ffmpeg_set_codec_channels(codec_ctx, setting.a_nb_channel); codec_ctx->sample_rate = setting.a_sample_rate; codec_ctx->sample_fmt = setting.a_sample_fmt; codec_ctx->time_base = {1, setting.a_sample_rate}; ffmpeg_set_codec_channel_layout(codec_ctx, ffmpeg_get_default_channel_layout(setting.a_nb_channel)); // 检查是否为RTMP推流(FLV格式) bool isRtmpStream = (_fmt_ctx->oformat && strcmp(_fmt_ctx->oformat->name, "flv") == 0); if (_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) { if (isRtmpStream) { // RTMP推流不使用GLOBAL_HEADER,保持ADTS格式 al_debug("RTMP stream detected, not setting GLOBAL_HEADER for AAC"); } else { // 其他格式使用GLOBAL_HEADER codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; codec_ctx->extradata_size = _a_stream->a_enc->get_extradata_size(); codec_ctx->extradata = (uint8_t *) av_memdup(_a_stream->a_enc->get_extradata(), _a_stream->a_enc->get_extradata_size()); al_debug("Non-RTMP stream, setting GLOBAL_HEADER for AAC"); } } // 始终为音频流设置extradata(AudioSpecificConfig),RTSP/SDP/MP4/FLV等容器需要该信息 if (_a_stream->a_enc->get_extradata_size() > 0) { uint8_t *asc = (uint8_t *)av_memdup(_a_stream->a_enc->get_extradata(), _a_stream->a_enc->get_extradata_size()); ffmpeg_set_stream_extradata(st, asc, _a_stream->a_enc->get_extradata_size()); al_debug("Set AAC extradata on stream: size=%d", _a_stream->a_enc->get_extradata_size()); } else { al_warn("AAC extradata size is 0; some outputs (e.g., RTSP/MP4) may fail in avformat_write_header"); } _a_stream->st = st; _a_stream->setting = setting; // 统一关闭 AAC 比特流过滤器:编码器输出原始 AAC(无 ADTS),由容器负责正确封装 _a_stream->filter = nullptr; al_debug("AAC bitstream filter disabled; container will handle framing based on extradata"); } while (0); return error; } int muxer_ffmpeg::open_output(const char *output_file, const MUX_SETTING_T &setting) { int error = AE_NO; int ret = 0; do { if (!(_fmt->flags & AVFMT_NOFILE)) { ret = avio_open(&_fmt_ctx->pb, output_file, AVIO_FLAG_WRITE); if (ret < 0) { error = AE_FFMPEG_OPEN_IO_FAILED; break; } } AVDictionary *opt = NULL; // 检查是否为RTSP推流,添加特定参数 std::string url_str(output_file); if (url_str.find("rtsp://") == 0) { // RTSP推流超低延迟参数设置 av_dict_set(&opt, "rtsp_transport", "tcp", 0); // 使用TCP传输,更稳定 av_dict_set(&opt, "muxdelay", "0", 0); // 设置最小复用延迟 av_dict_set(&opt, "fflags", "+genpts+flush_packets+nobuffer", 0); // 生成PTS、立即刷新包、无缓冲 av_dict_set(&opt, "max_delay", "0", 0); // 最小延迟 av_dict_set(&opt, "tune", "zerolatency", 0); // 零延迟调优 av_dict_set(&opt, "buffer_size", "512000", 0); // 减小缓冲区到512KB av_dict_set(&opt, "max_interleave_delta", "0", 0); // 最小交错延迟 al_debug("RTSP output detected, setting ultra-low-latency parameters"); } else if (url_str.find("rtmp://") == 0) { // RTMP推流超低延迟参数设置 av_dict_set(&opt, "muxdelay", "0", 0); av_dict_set(&opt, "fflags", "+genpts+flush_packets+nobuffer", 0); av_dict_set(&opt, "max_delay", "0", 0); av_dict_set(&opt, "buffer_size", "512000", 0); // 减小缓冲区到512KB av_dict_set(&opt, "max_interleave_delta", "0", 0); al_debug("RTMP output detected, setting ultra-low-latency parameters"); } else { // 非RTSP推流的原有参数 av_dict_set_int(&opt, "video_track_timescale", _v_stream->setting.v_frame_rate, 0); } ret = avformat_write_header(_fmt_ctx, &opt); av_dict_free(&opt); if (ret < 0) { al_debug("avformat_write_header failed with ret=%d, error=%s", ret, av_err2str(ret)); error = AE_FFMPEG_WRITE_HEADER_FAILED; break; } } while (0); return error; } void muxer_ffmpeg::cleanup_video() { if (!_v_stream) return; if (_v_stream->v_enc) delete _v_stream->v_enc; if (_v_stream->v_sws) delete _v_stream->v_sws; delete _v_stream; _v_stream = nullptr; } void muxer_ffmpeg::cleanup_audio() { if (!_a_stream) return; if (_a_stream->a_enc) delete _a_stream->a_enc; if (_a_stream->a_filter_amix) delete _a_stream->a_filter_amix; // 释放bitstream过滤器 if (_a_stream->filter) { ffmpeg_bitstream_filter_close(_a_stream->filter); _a_stream->filter = nullptr; } if (_a_stream->a_nb) { for (int i = 0; i < _a_stream->a_nb; i++) { if (_a_stream->a_filter_aresample && _a_stream->a_filter_aresample[i]) delete _a_stream->a_filter_aresample[i]; if (_a_stream->a_samples && _a_stream->a_samples[i]) { delete[] _a_stream->a_samples[i]->buff; delete _a_stream->a_samples[i]; } if (_a_stream->a_resamples && _a_stream->a_resamples[i]) { delete[] _a_stream->a_resamples[i]->buff; delete _a_stream->a_resamples[i]; } } if (_a_stream->a_filter_aresample) delete[] _a_stream->a_filter_aresample; if (_a_stream->a_samples) delete[] _a_stream->a_samples; if (_a_stream->a_resamples) delete[] _a_stream->a_resamples; } delete _a_stream; _a_stream = nullptr; } void muxer_ffmpeg::cleanup() { cleanup_video(); cleanup_audio(); if (_fmt && !(_fmt->flags & AVFMT_NOFILE)) avio_closep(&_fmt_ctx->pb); if (_fmt_ctx) { avformat_free_context(_fmt_ctx); } _fmt_ctx = NULL; _fmt = NULL; _inited = false; } uint64_t muxer_ffmpeg::get_current_time() { std::lock_guard lock(_time_mutex); return av_gettime_relative(); } int muxer_ffmpeg::write_video(AVPacket *packet) { //must lock here,coz av_interleaved_write_frame will push packet into a queue,and is not thread safe std::lock_guard lock(_mutex); packet->stream_index = _v_stream->st->index; // scale ts with timebase of base_time av_packet_rescale_ts(packet, _v_stream->v_enc->get_time_base(), {1, AV_TIME_BASE}); // make audio and video use one clock packet->pts = packet->pts - _base_time; packet->dts = packet->pts; //make sure that dts is equal to pts av_packet_rescale_ts(packet, {1, AV_TIME_BASE}, _v_stream->st->time_base); al_debug("V:%lld", packet->pts); av_assert0(packet->data != NULL); int ret = av_interleaved_write_frame(_fmt_ctx, packet); //no need to unref packet,this will be auto unref if (ret != 0) { al_fatal("write video frame error:%d", ret); } return ret; } int muxer_ffmpeg::write_audio(AVPacket *packet) { //must lock here,coz av_interleaved_write_frame will push packet into a queue,and is not thread safe std::lock_guard lock(_mutex); packet->stream_index = _a_stream->st->index; if (packet->pts == AV_NOPTS_VALUE) { packet->pts = av_gettime_relative(); } // scale ts with timebase of base_time av_packet_rescale_ts(packet, _a_stream->a_enc->get_time_base(), {1, AV_TIME_BASE}); // make audio and video use one clock packet->pts = packet->pts - _base_time; packet->dts = packet->pts; //make sure that dts is equal to pts av_packet_rescale_ts(packet, {1, AV_TIME_BASE}, _a_stream->st->time_base); al_debug("A:%lld %lld", packet->pts, packet->dts); av_assert0(packet->data != NULL); int ret = av_interleaved_write_frame(_fmt_ctx, packet); //no need to unref packet,this will be auto unref if (ret != 0) { al_fatal("write audio frame error:%d", ret); } return ret; } } // namespace am