ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

Qt+FFmpeg录音

2019-03-04 22:52:47  阅读:419  来源: 互联网

标签:aEncodeCtx Qt int 录音 sample ret av include FFmpeg


ScreenRecordImpl.h

#pragma once
#include <Windows.h>
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <QObject>
#include <QString>
#include <QMutex>

#ifdef	__cplusplus
extern "C"
{
#endif
struct AVFormatContext;
struct AVCodecContext;
struct AVCodec;
struct AVFifoBuffer;
struct AVAudioFifo;
struct AVFrame;
struct SwsContext;
struct SwrContext;
#ifdef __cplusplus
};
#endif

//主线程:Qt GUI线程
//父线程:RecordAudioThreadProc
//子线程:AcquireSoundThreadProc
//测试发现音频编码速度比采集速度要快,跟视频相反
//发现父线程(编码)可能会在子线程之前退出(采集)
//如果是这样的话,可以将子线程.join()放在父线程释放资源之前

class ScreenRecordImpl : public QObject
{
	Q_OBJECT
private:
	enum RecordState {
		NotStarted,
		Started,
		Paused,
		Stopped,
		Unknown,
	};
public:
	ScreenRecordImpl(QObject * parent = Q_NULLPTR);
	void Init(const QVariantMap& map);

	private slots :
	void Start();
	void Pause();
	void Stop();

private:
	//从fifobuf读取音频帧,编码,写入输出流,生成文件
	void RecordAudioThreadProc();
	//从音频输入流读取帧,写入fifobuf
	void AcquireSoundThreadProc();
	int OpenAudio();
	int OpenOutput();
	QString GetSpeakerDeviceName();
	QString GetMicrophoneDeviceName();
	AVFrame* AllocAudioFrame(AVCodecContext* c, int nbSamples);
	//取出编码器里的帧,写入输出流
	void FlushEncoder();
	void Release();

private:
	QString						m_filePath;
	int							m_bitrate;
	int							m_aIndex;	//输入音频流索引
	int							m_aOutIndex;//输出音频流索引
	AVFormatContext				*m_aFmtCtx;
	AVFormatContext				*m_oFmtCtx;
	AVCodecContext				*m_aDecodeCtx;
	AVCodecContext				*m_aEncodeCtx;
	SwrContext					*m_swrCtx;
	AVAudioFifo					*m_aFifoBuf;

	std::atomic_bool			m_stop;

	//Frame里单个通道的样本数
	//暂时把输入流所有的nb_samples当做一样的
	int							m_nbSamples;	
	std::mutex					m_mtx;
	std::condition_variable		m_cvNotEmpty;	//当fifoBuf空了,编码线程挂起
	std::condition_variable		m_cvNotFull;	//当fifoBuf满了,采集线程挂起
	std::mutex					m_mtxPause;
	std::condition_variable		m_cvNotPause;	//当点击暂停的时候,采集线程挂起
	RecordState					m_state;
};

ScreenRecordImpl.cpp

#ifdef	__cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/imgutils.h"
#include "libswresample/swresample.h"
#include <libavutil\avassert.h>
#ifdef __cplusplus
};
#endif

#include "SoundRecordImpl.h"
#include <QDebug>
#include <QAudioDeviceInfo>
#include <thread>
#include <fstream>

#include <dshow.h>

using namespace std;

//g_collectFrameCnt等于g_encodeFrameCnt证明编解码帧数一致
int g_collectFrameCnt = 0;	//采集帧数
int g_encodeFrameCnt = 0;	//编码帧数

ScreenRecordImpl::ScreenRecordImpl(QObject * parent) :
	QObject(parent)
	, m_aIndex(-1)
	, m_aFmtCtx(nullptr), m_oFmtCtx(nullptr)
	, m_aDecodeCtx(nullptr)
	, m_aEncodeCtx(nullptr)
	, m_aFifoBuf(nullptr)
	, m_stop(false)
	, m_state(RecordState::NotStarted)
{
}

void ScreenRecordImpl::Init(const QVariantMap& map)
{
	m_filePath = map["filePath"].toString();
	m_bitrate = map["bit_rate"].toInt();
}

void ScreenRecordImpl::Start()
{
	if (m_state == RecordState::NotStarted)
	{
		qDebug() << "start record";
		m_state = RecordState::Started;
		std::thread recordThread(&ScreenRecordImpl::RecordAudioThreadProc, this);
		recordThread.detach();
	}
	else if (m_state == RecordState::Paused)
	{
		qDebug() << "continue record";
		m_state = RecordState::Started;
		m_cvNotPause.notify_one();
	}
}

void ScreenRecordImpl::Pause()
{
	qDebug() << "pause record";
	m_state = RecordState::Paused;
}

void ScreenRecordImpl::Stop()
{
	qDebug() << "stop record";
	RecordState state = m_state;
	m_state = RecordState::Stopped;
	if (state == RecordState::Paused)
		m_cvNotPause.notify_one();
}

static char *dup_wchar_to_utf8(wchar_t *w)
{
	char *s = NULL;
	int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
	s = (char *)av_malloc(l);
	if (s)
		WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
	return s;
}

static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
{
	const enum AVSampleFormat *p = codec->sample_fmts;

	while (*p != AV_SAMPLE_FMT_NONE) {
		if (*p == sample_fmt)
			return 1;
		p++;
	}
	return 0;
}

int ScreenRecordImpl::OpenAudio()
{
	int ret = -1;
	AVCodec *decoder = nullptr;

	qDebug() << GetSpeakerDeviceName();
	qDebug() << GetMicrophoneDeviceName();
	QString audioDeviceName = "audio=" + GetMicrophoneDeviceName();
	//AVDictionary* options = nullptr;
	//av_dict_set(&options, "list_devices", "true", 0);
	//AVInputFormat *iformat = av_find_input_format("dshow");
	//qDebug() << "Device Info=============";
	//avformat_open_input(&m_aFmtCtx, "audio=dummy", iformat, &options);
	//qDebug() << "========================";

	//查找输入方式
	AVInputFormat *ifmt = av_find_input_format("dshow");
	//char * deviceName = dup_wchar_to_utf8(L"audio=麦克风 (Conexant SmartAudio HD)"); 
	//char * deviceName = dup_wchar_to_utf8(L"audio=麦克风 (High Definition Audio 设备)");
	if (avformat_open_input(&m_aFmtCtx, audioDeviceName.toStdString().c_str(), ifmt, nullptr) < 0)
	{
		qDebug() << "Can not open audio input stream";
		return -1;
	}
	if (avformat_find_stream_info(m_aFmtCtx, nullptr) < 0)
		return -1;

	for (int i = 0; i < m_aFmtCtx->nb_streams; ++i)
	{
		AVStream * stream = m_aFmtCtx->streams[i];
		if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
		{
			decoder = avcodec_find_decoder(stream->codecpar->codec_id);
			if (decoder == nullptr)
			{
				printf("Codec not found.(没有找到解码器)\n");
				return -1;
			}
			//从视频流中拷贝参数到codecCtx
			m_aDecodeCtx = avcodec_alloc_context3(decoder);
			if ((ret = avcodec_parameters_to_context(m_aDecodeCtx, stream->codecpar)) < 0)
			{
				qDebug() << "Audio avcodec_parameters_to_context failed,error code: " << ret;
				return -1;
			}
			m_aIndex = i;
			break;
		}
	}
	if (0 > avcodec_open2(m_aDecodeCtx, decoder, NULL))
	{
		printf("can not find or open audio decoder!\n");
		return -1;
	}
	return 0;
}

int ScreenRecordImpl::OpenOutput()
{
	int ret = -1;
	AVStream *vStream = nullptr, *aStream = nullptr;
	string filePath = m_filePath.toStdString();
	ret = avformat_alloc_output_context2(&m_oFmtCtx, nullptr, nullptr, filePath.c_str());
	if (ret < 0)
	{
		qDebug() << "avformat_alloc_output_context2 failed";
		return -1;
	}
	if (m_aFmtCtx->streams[m_aIndex]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
	{
		aStream = avformat_new_stream(m_oFmtCtx, NULL);
		if (!aStream)
		{
			printf("can not new audio stream for output!\n");
			return -1;
		}
		m_aOutIndex = aStream->index;

		AVCodec *encoder = avcodec_find_encoder(m_oFmtCtx->oformat->audio_codec);
		if (!encoder)
		{
			qDebug() << "Can not find audio encoder, id: " << m_oFmtCtx->oformat->audio_codec;
			return -1;
		}
		m_aEncodeCtx = avcodec_alloc_context3(encoder);
		if (nullptr == m_aEncodeCtx)
		{
			qDebug() << "audio avcodec_alloc_context3 failed";
			return -1;
		}
		//ret = avcodec_parameters_to_context(m_aEncodeCtx, m_aFmtCtx->streams[m_aIndex]->codecpar);
		//if (ret < 0)
		//{
		//	qDebug() << "Output audio avcodec_parameters_to_context,error code:" << ret;
		//	return -1;
		//}
		m_aEncodeCtx->sample_fmt = encoder->sample_fmts ? encoder->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
		m_aEncodeCtx->bit_rate = m_bitrate;
		m_aEncodeCtx->sample_rate = 44100;
		if (encoder->supported_samplerates) 
		{
			m_aEncodeCtx->sample_rate = encoder->supported_samplerates[0];
			for (int i = 0; encoder->supported_samplerates[i]; ++i)
			{
				if (encoder->supported_samplerates[i] == 44100)
					m_aEncodeCtx->sample_rate = 44100;
			}
		}
		m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
		m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
		if (encoder->channel_layouts) 
		{
			m_aEncodeCtx->channel_layout = encoder->channel_layouts[0];
			for (int i = 0; encoder->channel_layouts[i]; ++i) 
			{
				if (encoder->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
					m_aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
			}
		}
		m_aEncodeCtx->channels = av_get_channel_layout_nb_channels(m_aEncodeCtx->channel_layout);
		m_aEncodeCtx->time_base = AVRational{ 1, m_aEncodeCtx->sample_rate };
		aStream->time_base = AVRational{ 1, m_aEncodeCtx->sample_rate };

		m_aEncodeCtx->codec_tag = 0;
		m_aEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

		if (!check_sample_fmt(encoder, m_aEncodeCtx->sample_fmt)) 
		{
			qDebug() << "Encoder does not support sample format " << av_get_sample_fmt_name(m_aEncodeCtx->sample_fmt);
			return -1;
		}
		//打开音频编码器,打开后frame_size被设置
		ret = avcodec_open2(m_aEncodeCtx, encoder, 0);
		if (ret < 0)
		{
			qDebug() << "Can not open the audio encoder, id: " << encoder->id << "error code: " << ret;
			return -1;
		}
		//将codecCtx中的参数传给音频输出流
		ret = avcodec_parameters_from_context(aStream->codecpar, m_aEncodeCtx);
		if (ret < 0)
		{
			qDebug() << "Output audio avcodec_parameters_from_context,error code:" << ret;
			return -1;
		}
		m_swrCtx = swr_alloc();
		if (!m_swrCtx)
		{
			qDebug() << "swr_alloc failed";
			return -1;
		}
		av_opt_set_int(m_swrCtx, "in_channel_count", m_aDecodeCtx->channels, 0);	//2
		av_opt_set_int(m_swrCtx, "in_sample_rate", m_aDecodeCtx->sample_rate, 0);	//44100
		av_opt_set_sample_fmt(m_swrCtx, "in_sample_fmt", m_aDecodeCtx->sample_fmt, 0);	//AV_SAMPLE_FMT_S16
		av_opt_set_int(m_swrCtx, "out_channel_count", m_aEncodeCtx->channels, 0);	//2
		av_opt_set_int(m_swrCtx, "out_sample_rate", m_aEncodeCtx->sample_rate, 0);	//44100
		av_opt_set_sample_fmt(m_swrCtx, "out_sample_fmt", m_aEncodeCtx->sample_fmt, 0);	//AV_SAMPLE_FMT_FLTP
		if ((ret = swr_init(m_swrCtx)) < 0) 
		{
			qDebug() << "swr_init failed";
			return -1;
		}
	}
	//打开输出文件
	if (!(m_oFmtCtx->oformat->flags & AVFMT_NOFILE))
	{
		if (avio_open(&m_oFmtCtx->pb, filePath.c_str(), AVIO_FLAG_WRITE) < 0)
		{
			printf("can not open output file handle!\n");
			return -1;
		}
	}
	//写文件头
	if (avformat_write_header(m_oFmtCtx, nullptr) < 0)
	{
		printf("can not write the header of the output file!\n");
		return -1;
	}
	return 0;
}

QString ScreenRecordImpl::GetSpeakerDeviceName()
{
	char sName[256] = { 0 };
	QString speaker = "";
	bool bRet = false;
	::CoInitialize(NULL);

	ICreateDevEnum* pCreateDevEnum;//enumrate all speaker devices
	HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum,
		NULL,
		CLSCTX_INPROC_SERVER,
		IID_ICreateDevEnum,
		(void**)&pCreateDevEnum);

	IEnumMoniker* pEm;
	hr = pCreateDevEnum->CreateClassEnumerator(CLSID_AudioRendererCategory, &pEm, 0);
	if (hr != NOERROR)
	{
		::CoUninitialize();
		return "";
	}

	pEm->Reset();
	ULONG cFetched;
	IMoniker *pM;
	while (hr = pEm->Next(1, &pM, &cFetched), hr == S_OK)
	{

		IPropertyBag* pBag = NULL;
		hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
		if (SUCCEEDED(hr))
		{
			VARIANT var;
			var.vt = VT_BSTR;
			hr = pBag->Read(L"FriendlyName", &var, NULL);//还有其他属性,像描述信息等等
			if (hr == NOERROR)
			{
				//获取设备名称
				WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sName, 256, "", NULL);
				speaker = QString::fromLocal8Bit(sName);
				SysFreeString(var.bstrVal);
			}
			pBag->Release();
		}
		pM->Release();
		bRet = true;
	}
	pCreateDevEnum = NULL;
	pEm = NULL;
	::CoUninitialize();
	return speaker;
}

QString ScreenRecordImpl::GetMicrophoneDeviceName()
{
	char sName[256] = { 0 };
	QString capture = "";
	bool bRet = false;
	::CoInitialize(NULL);

	ICreateDevEnum* pCreateDevEnum;//enumrate all audio capture devices
	HRESULT hr = CoCreateInstance(CLSID_SystemDeviceEnum,
		NULL,
		CLSCTX_INPROC_SERVER,
		IID_ICreateDevEnum,
		(void**)&pCreateDevEnum);

	IEnumMoniker* pEm;
	hr = pCreateDevEnum->CreateClassEnumerator(CLSID_AudioInputDeviceCategory, &pEm, 0);
	if (hr != NOERROR)
	{
		::CoUninitialize();
		return "";
	}

	pEm->Reset();
	ULONG cFetched;
	IMoniker *pM;
	while (hr = pEm->Next(1, &pM, &cFetched), hr == S_OK)
	{

		IPropertyBag* pBag = NULL;
		hr = pM->BindToStorage(0, 0, IID_IPropertyBag, (void**)&pBag);
		if (SUCCEEDED(hr))
		{
			VARIANT var;
			var.vt = VT_BSTR;
			hr = pBag->Read(L"FriendlyName", &var, NULL);//还有其他属性,像描述信息等等
			if (hr == NOERROR)
			{
				//获取设备名称
				WideCharToMultiByte(CP_ACP, 0, var.bstrVal, -1, sName, 256, "", NULL);
				capture = QString::fromLocal8Bit(sName);
				SysFreeString(var.bstrVal);
			}
			pBag->Release();
		}
		pM->Release();
		bRet = true;
	}
	pCreateDevEnum = NULL;
	pEm = NULL;
	::CoUninitialize();
	return capture;
}

AVFrame* ScreenRecordImpl::AllocAudioFrame(AVCodecContext* c, int nbSamples)
{
	AVFrame *frame = av_frame_alloc();
	int ret;

	frame->format = c->sample_fmt;
	frame->channel_layout = c->channel_layout ? c->channel_layout: AV_CH_LAYOUT_STEREO;
	frame->sample_rate = c->sample_rate;
	frame->nb_samples = nbSamples;

	if (nbSamples)
	{
		ret = av_frame_get_buffer(frame, 0);
		if (ret < 0) 
		{
			qDebug() << "av_frame_get_buffer failed";
			return nullptr;
		}
	}
	return frame;
}

void ScreenRecordImpl::FlushEncoder()
{
	int ret = -1;
	int nFlush = 0;
	AVPacket pkt = { 0 };
	av_init_packet(&pkt);
	ret = avcodec_send_frame(m_aEncodeCtx, nullptr);
	qDebug() << "flush audio avcodec_send_frame ret: " << ret;
	while (ret >= 0)
	{
		ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);
		if (ret < 0)
		{
			av_packet_unref(&pkt);
			if (ret == AVERROR(EAGAIN))
			{
				qDebug() << "flush EAGAIN avcodec_receive_packet";
				ret = 1;
				continue;
			}
			else if (ret == AVERROR_EOF)
			{
				qDebug() << "flush video encoder finished";
				break;
			}
			qDebug() << "flush audio avcodec_receive_packet failed, ret: " << ret;
			return;
		}
		++nFlush;
		pkt.stream_index = m_aOutIndex;
		ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
		if (ret == 0)
			qDebug() << "flush write audio packet id: " << ++g_encodeFrameCnt;
		else
			qDebug() << "flush audio av_interleaved_write_frame failed, ret: " << ret;
		av_free_packet(&pkt);
	}
	qDebug() << "flush times: " << nFlush;
}

void ScreenRecordImpl::Release()
{
	if (m_oFmtCtx)
	{
		avio_close(m_oFmtCtx->pb);
		avformat_free_context(m_oFmtCtx);
		m_oFmtCtx = nullptr;
	}
	if (m_aDecodeCtx)
	{
		avcodec_free_context(&m_aDecodeCtx);
		m_aDecodeCtx = nullptr;
	}
	if (m_aEncodeCtx)
	{
		avcodec_free_context(&m_aEncodeCtx);
		m_aEncodeCtx = nullptr;
	}
	if (m_aFifoBuf)
	{
		av_audio_fifo_free(m_aFifoBuf);
		m_aFifoBuf = nullptr;
	}
	if (m_aFmtCtx)
	{
		avformat_close_input(&m_aFmtCtx);
		m_aFmtCtx = nullptr;
	}
}

void ScreenRecordImpl::RecordAudioThreadProc()
{
	int ret = -1;
	bool done = false;
	int aFrameIndex = 0;

	av_register_all();
	avdevice_register_all();
	avcodec_register_all();

	if (OpenAudio() < 0)
		return;
	if (OpenOutput() < 0)
		return;

	//1152
	m_nbSamples = m_aEncodeCtx->frame_size;
	if (!m_nbSamples)
	{
		qDebug() << "m_nbSamples==0";
		m_nbSamples = 1024;
	}
	m_aFifoBuf = av_audio_fifo_alloc(m_aEncodeCtx->sample_fmt, m_aEncodeCtx->channels, 30 * m_nbSamples);
	if (!m_aFifoBuf)
	{
		qDebug() << "av_audio_fifo_alloc failed";
		return;
	}

	//启动音视频数据采集线程
	std::thread soundRecord(&ScreenRecordImpl::AcquireSoundThreadProc, this);
	soundRecord.detach();

	while (1)
	{
		if (m_state == RecordState::Stopped && !done)
			done = true;
		if (done)
		{
			lock_guard<mutex> lk(m_mtx);
			if (av_audio_fifo_size(m_aFifoBuf) < m_nbSamples)
				break;
		}
		{
			std::unique_lock<mutex> lk(m_mtx);
			m_cvNotEmpty.wait(lk, [this] {return av_audio_fifo_size(m_aFifoBuf) >= m_nbSamples; });
		}
		int ret = -1;
		AVFrame *aFrame = av_frame_alloc();
		aFrame->nb_samples = m_nbSamples;
		aFrame->channel_layout = m_aEncodeCtx->channel_layout;
		aFrame->format = m_aEncodeCtx->sample_fmt;
		aFrame->sample_rate = m_aEncodeCtx->sample_rate;
		aFrame->pts = aFrameIndex * m_nbSamples;
		++aFrameIndex;
		//分配data buf
		ret = av_frame_get_buffer(aFrame, 0);

		av_audio_fifo_read(m_aFifoBuf, (void **)aFrame->data, m_nbSamples);
		m_cvNotFull.notify_one();

		AVPacket pkt = { 0 };
		av_init_packet(&pkt);
		//m_aEncodeCtx->frame_size要等于aFrame->nb_samples,不要会报错
		ret = avcodec_send_frame(m_aEncodeCtx, aFrame);
		if (ret != 0)
		{
			qDebug() << "audio avcodec_send_frame failed, ret: " << ret;
			av_frame_free(&aFrame);
			av_packet_unref(&pkt);
			continue;
		}
		ret = avcodec_receive_packet(m_aEncodeCtx, &pkt);
		if (ret != 0)
		{
			qDebug() << "audio avcodec_receive_packet failed";
			av_frame_free(&aFrame);
			av_packet_unref(&pkt);
			continue;
		}
		pkt.stream_index = m_aOutIndex;
		//pkt.pts = aFrame->pts;
		//pkt.dts = pkt.pts;
		//pkt.duration = m_nbSamples;

		ret = av_interleaved_write_frame(m_oFmtCtx, &pkt);
		if (ret == 0)
			qDebug() << "Write audio packet id: " << ++g_encodeFrameCnt;
		else
			qDebug() << "audio av_interleaved_write_frame failed, ret: " << ret;

		av_frame_free(&aFrame);
		av_free_packet(&pkt);
	}

	FlushEncoder();
	av_write_trailer(m_oFmtCtx);
	Release();
	qDebug() << "parent thread exit";
}

void ScreenRecordImpl::AcquireSoundThreadProc()
{
	int ret = -1;
	AVPacket pkg = { 0 };
	av_init_packet(&pkg);
	int nbSamples = m_nbSamples;
	int dstNbSamples, maxDstNbSamples;
	AVFrame *rawFrame = av_frame_alloc();
	AVFrame *newFrame = AllocAudioFrame(m_aEncodeCtx, nbSamples);

	maxDstNbSamples = dstNbSamples = av_rescale_rnd(nbSamples, 
		m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);

	while (m_state != RecordState::Stopped)
	{
		if (m_state == RecordState::Paused)
		{
			unique_lock<mutex> lk(m_mtxPause);
			m_cvNotPause.wait(lk, [this] { return m_state != RecordState::Paused; });
		}
		if (av_read_frame(m_aFmtCtx, &pkg) < 0)
		{
			qDebug() << "audio av_read_frame < 0";
			continue;
		}
		if (pkg.stream_index != m_aIndex)
		{
			av_packet_unref(&pkg);
			continue;
		}
		ret = avcodec_send_packet(m_aDecodeCtx, &pkg);
		if (ret != 0)
		{
			av_packet_unref(&pkg);
			continue;
		}
		ret = avcodec_receive_frame(m_aDecodeCtx, rawFrame);
		if (ret != 0)
		{
			av_packet_unref(&pkg);
			continue;
		}
		//1152->22050
		dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,
			m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);
		if (dstNbSamples > maxDstNbSamples)
		{
			qDebug() << ">>>";
			av_freep(&newFrame->data[0]);
			//nb_samples*nb_channels*Bytes_sample_fmt
			ret = av_samples_alloc(newFrame->data, newFrame->linesize, m_aEncodeCtx->channels,
				dstNbSamples, m_aEncodeCtx->sample_fmt, 1);
			if (ret < 0)
			{
				qDebug() << "av_samples_alloc failed";
				return;
			}
			maxDstNbSamples = dstNbSamples;
			m_aEncodeCtx->frame_size = dstNbSamples;
			m_nbSamples = newFrame->nb_samples;
		}
		//dstNbSamples = av_rescale_rnd(swr_get_delay(m_swrCtx, m_aDecodeCtx->sample_rate) + rawFrame->nb_samples,
		//	m_aEncodeCtx->sample_rate, m_aDecodeCtx->sample_rate, AV_ROUND_UP);
		//av_assert0(dstNbSamples == rawFrame->nb_samples);
		//ret = av_frame_make_writable(rawFrame);
		//if (ret != 0)
		//{
		//	qDebug() << "av_frame_make_writable failed";
		//	return;
		//}
		newFrame->nb_samples = swr_convert(m_swrCtx, newFrame->data, dstNbSamples,
			(const uint8_t **)rawFrame->data, rawFrame->nb_samples);
		if (newFrame->nb_samples < 0)
		{
			qDebug() << "swr_convert failed";
			return;
		}
		{
			unique_lock<mutex> lk(m_mtx);
			m_cvNotFull.wait(lk, [newFrame, this] { return av_audio_fifo_space(m_aFifoBuf) >= newFrame->nb_samples; });
		}
		if (av_audio_fifo_write(m_aFifoBuf, (void **)newFrame->data, newFrame->nb_samples) < newFrame->nb_samples)
		{
			qDebug() << "av_audio_fifo_write";
			return;
		}
		//m_nbSamples = newFrame->nb_samples;
		m_cvNotEmpty.notify_one();
	}
	av_frame_free(&rawFrame);
	av_frame_free(&newFrame);
	qDebug() << "sound record thread exit";
}

ScreenRecordTest.h

#pragma once
#include <QObject>
#include <QVariant>

class ScreenRecord : public QObject
{
	Q_OBJECT
public:
	ScreenRecord(QObject *parent = Q_NULLPTR);

private:
	QVariantMap m_args;
};

ScreenRecordTest.cpp

#include "SoundRecordTest.h"
#include "SoundRecordImpl.h"
#include <QTimer>

ScreenRecord::ScreenRecord(QObject *parent) :
	QObject(parent)
{
	m_args["filePath"] = "test.mp3";
	m_args["bit_rate"] = 128000;

	ScreenRecordImpl *sr = new ScreenRecordImpl(this);
	sr->Init(m_args);
	QTimer::singleShot(1000, sr, SLOT(Start()));
	QTimer::singleShot(6000, sr, SLOT(Pause()));
	QTimer::singleShot(8000, sr, SLOT(Stop()));
}

main.cpp

#include <QApplication>
#include "SoundRecordTest.h"

int main(int argc, char *argv[])
{
	QApplication a(argc, argv);

	ScreenRecord sr;

	return a.exec();
}

 

标签:aEncodeCtx,Qt,int,录音,sample,ret,av,include,FFmpeg
来源: https://blog.csdn.net/ET_Endeavoring/article/details/88141789

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有