【C】借助DirectSound进行流的形式录音的例子
DirectSound是DirectX的一个组件。用于播放声音或录音等。我这个录音的程序其实就是修改的上一个帖子的播放器的代码。
其实只需要用查找替换功能把那篇帖子的源码中的“DirectSound”替换为“DirectSoundCapture”,然后把“DIRECTSOUND”替换成“DIRECTSOUNDCAPTURE”,最后把“播放”替换成“录音”。做完查找替换之后再对其进行一些细微的修改就可以实现流的形式录音了。真是赞。
代码在此。我用的是Microsoft DirectX SDK (March 2008)的头文件和lib。Src和Bin可以下载。Src有配置好的VC6工程。你可以把它升级为VS2012的工程等。//=============================================================================
//StreamRecord:
//C语言写的借助DirectSound8进行流模式录音的例子。
//只能在Windows上运行。控制台程序。
//用法:
//StreamRecord 声音文件.WAV
//为了便于代码复用,我以面向对象的思想来编写,并且把从缓冲区读取波形的函数独立
//出来以便于用户自定义。
//作者:0xAA55
//论坛:http://www.0xaa55.com/
//版权所有(C) 2013-2014 技术宅的结界
//请保留原作者信息,否则视为侵权。
//-----------------------------------------------------------------------------
#define DIRECTSOUND_VERSION 0x0800
#include<stdio.h>
#include<stdlib.h>
#include<signal.h>
#include<dsound.h>
#define V(action) if(FAILED(hr=(action))){SRCleanup(p);fprintf(stderr,#action"==0x%08X\n",hr);return hr;}
#define VN(action) if(!(action)){SRCleanup(p);fputs(#action" failed\n",stderr);return E_FAIL;}
#define SR_BufDuration 400 //缓冲区时长(毫秒)
typedef void(*PFNREADBUFFERCALLBACK)(void*pBuffer,UINT uBufferSize);//写波形到缓冲区的函数原型
int g_Quit=0; //是否Ctrl+C
#pragma pack(push,1)
struct
{
BYTE RIFFFlag;
DWORD dwRIFFVal;
BYTE WAVEFlag;
BYTE fmt_Flag;
DWORD dwfmtChunkSize;
PCMWAVEFORMAT fmtChunk;
BYTE dataFlag;
DWORD dwSampleBytes;
}g_WAVHeader={
{'R','I','F','F'},
0,
{'W','A','V','E'},
{'f','m','t',' '},
sizeof(PCMWAVEFORMAT),
{0},
{'d','a','t','a'},
0
};
#pragma pack(pop)
FILE* g_fp=NULL;
UINT g_uCurPos=sizeof(g_WAVHeader);
//=============================================================================
//ReadBuffer:
//从缓冲区读录制的波形
//-----------------------------------------------------------------------------
void ReadBuffer(void*pBuffer,UINT uBufferSize)
{
fseek(g_fp,g_uCurPos,SEEK_SET);
fwrite(pBuffer,1,uBufferSize,g_fp);
g_uCurPos+=uBufferSize;
g_WAVHeader.dwSampleBytes+=uBufferSize;
printf("Total samples:%d\t\t\r",g_WAVHeader.dwSampleBytes);
}
//=============================================================================
//StreamRecord:
//借助DirectSound录音的对象
//-----------------------------------------------------------------------------
typedef struct
{
DSCCAPS Caps; //能力表
LPDIRECTSOUNDCAPTURE8 pDSC8; //中介
LPDIRECTSOUNDCAPTUREBUFFER pDSCB; //声音缓冲区
LPDIRECTSOUNDNOTIFY pDSN; //事件产生器
LPDIRECTSOUNDCAPTUREBUFFER8 pDSCB8; //声音缓冲区(版本8)
DSBPOSITIONNOTIFY DSBPositionNotify; //录音位置事件
PFNREADBUFFERCALLBACK pfnReadBufferCallBack; //写波形到缓冲区的回调函数
}StreamRecord;
//=============================================================================
//SRCleanup:
//使用完毕后清理内存
//-----------------------------------------------------------------------------
void SRCleanup
(
StreamRecord *p //要清理的结构体
)
{
CloseHandle(p->DSBPositionNotify.hEventNotify);
if(p->pDSCB8)
{
IDirectSoundCaptureBuffer8_Stop(p->pDSCB8);
IDirectSoundCaptureBuffer8_Release(p->pDSCB8);
p->pDSCB8=NULL;
}
if(p->pDSN)
{
IDirectSoundNotify_Release(p->pDSN);
p->pDSN=NULL;
}
if(p->pDSCB)
{
IDirectSoundCaptureBuffer_Release(p->pDSCB);
p->pDSCB=NULL;
}
if(p->pDSC8)
{
IDirectSoundCapture_Release(p->pDSC8);
p->pDSC8=NULL;
}
}
//=============================================================================
//SRProcBuffer:
//填充声音数据到缓冲区
//-----------------------------------------------------------------------------
HRESULT SRProcBuffer
(
StreamRecord *p //要填充的结构体
)
{
HRESULT hr;
LPVOID//两个缓冲区指针
pBuf1=NULL,
pBuf2=NULL;
DWORD//两个缓冲区大小
dwBuf1Size=0,
dwBuf2Size=0;
V(IDirectSoundCaptureBuffer8_Lock(p->pDSCB8,0,0,&pBuf1,&dwBuf1Size,&pBuf2,&dwBuf2Size,DSCBLOCK_ENTIREBUFFER));
if(p->pfnReadBufferCallBack)//如果用户给出了回调函数,则使用回调函数取得声波数据来填写缓冲区
{
p->pfnReadBufferCallBack(pBuf1,dwBuf1Size);
if(pBuf2)
p->pfnReadBufferCallBack(pBuf2,dwBuf2Size);
}
V(IDirectSoundBuffer8_Unlock(p->pDSCB8,pBuf1,dwBuf1Size,pBuf2,dwBuf2Size));
return hr;
}
//=============================================================================
//SRInit:
//初始化StreamRecord
//-----------------------------------------------------------------------------
HRESULT SRInit
(
StreamRecord *p, //[输出]初始化得到的结构体
HWND hWnd, //[输入]得到焦点的窗口
DWORD dwBufferSize, //[输入]缓冲区大小
PCMWAVEFORMAT *pFormat, //[输入]波形的格式
//取得所需的参数。
PFNREADBUFFERCALLBACK pfnReadBufferCallBack
)
{
HRESULT hr;
DSCBUFFERDESC DSCBufferDesc;//缓冲区描述符
WAVEFORMATEX WaveFormatEx;//WAV格式
p->Caps.dwSize=sizeof(p->Caps);
p->pfnReadBufferCallBack=pfnReadBufferCallBack;//填充波形数据用到的回调函数
V(DirectSoundCaptureCreate8(&DSDEVID_DefaultCapture,&(p->pDSC8),NULL));//DSERR_ALLOCATED
V(IDirectSoundCapture_GetCaps(p->pDSC8,&(p->Caps)));//取得硬件能力表
DSCBufferDesc.dwSize=sizeof(DSCBufferDesc);
DSCBufferDesc.dwFlags=0;
DSCBufferDesc.dwBufferBytes=dwBufferSize;//缓冲区大小
DSCBufferDesc.dwReserved=0;
DSCBufferDesc.lpwfxFormat=&WaveFormatEx;//波形格式
WaveFormatEx.wFormatTag=pFormat->wf.wFormatTag;
WaveFormatEx.nChannels=pFormat->wf.nChannels;
WaveFormatEx.nSamplesPerSec=pFormat->wf.nSamplesPerSec;
WaveFormatEx.nAvgBytesPerSec=pFormat->wf.nAvgBytesPerSec;
WaveFormatEx.nBlockAlign=pFormat->wf.nBlockAlign;
WaveFormatEx.wBitsPerSample=pFormat->wBitsPerSample;
WaveFormatEx.cbSize=sizeof(WaveFormatEx);
DSCBufferDesc.dwFXCount=0;
DSCBufferDesc.lpDSCFXDesc=NULL;
V(IDirectSoundCapture_CreateCaptureBuffer(p->pDSC8,&DSCBufferDesc,&(p->pDSCB),NULL));//建立缓冲区
V(IDirectSoundCaptureBuffer_QueryInterface(p->pDSCB,&IID_IDirectSoundNotify,&(p->pDSN)));//建立提醒
V(IDirectSoundCaptureBuffer_QueryInterface(p->pDSCB,&IID_IDirectSoundCaptureBuffer8,&(p->pDSCB8)));//取得版本8的缓冲区
p->pDSCB->lpVtbl->Release(p->pDSCB);
p->pDSCB=NULL;
p->DSBPositionNotify.dwOffset=0;//每次录音指针到缓冲区开头的时候写入数据
VN(p->DSBPositionNotify.hEventNotify=CreateEvent(NULL,FALSE,FALSE,NULL));
V(IDirectSoundNotify_SetNotificationPositions(p->pDSN,1,&(p->DSBPositionNotify)));
V(SRProcBuffer(p));
V(IDirectSoundCaptureBuffer8_Start(p->pDSCB8,DSCBSTART_LOOPING));//循环录音
return hr;
}
//=============================================================================
//SRUpdate:
//检查是否需要读取已录好的数据
//-----------------------------------------------------------------------------
HRESULT SRUpdate
(
StreamRecord *p
)
{
HRESULT hr;
if(WaitForSingleObject(p->DSBPositionNotify.hEventNotify,0)!=WAIT_TIMEOUT)//检测状态(是否需要填充新的数据到缓冲区)
V(SRProcBuffer(p));
return hr;
}
void Signal(int sig)
{
switch(sig)
{
case SIGINT:
g_Quit=1;
fputs("Ctrl+C\n",stderr);
break;
}
}
int main(int argc,char**argv)
{
HRESULT hr;
StreamRecord SR={0};
//先找到窗口作为焦点窗口
HWND hWnd=FindWindow(TEXT("ConsoleWindowClass"),NULL);
if(!hWnd)
{
fputs("Could not get the console window handle.\n",stderr);
return 2;
}
if(argc<6)
{
fputs(
"Usage:\n"
"StreamRecord <FormatTag> <Channels> <Bits> <SampleRate> <WAVFile>\n"
"FormatTag: 1:Normal PCM2:High quality PCM 3:IEEE Floating Point PCM\n"
"Channels: 1:Mono 2:Stereo\n"
"SampleRate: Samples per second\n"
"Bits: Can be 8/16/24/32/48/64 etc.\n",stderr);
return 1;
}
signal(SIGINT,Signal);
g_WAVHeader.fmtChunk.wf.wFormatTag= (WORD)atoi(argv);
g_WAVHeader.fmtChunk.wf.nChannels= (WORD)atoi(argv);
g_WAVHeader.fmtChunk.wBitsPerSample= (WORD)atoi(argv);
g_WAVHeader.fmtChunk.wf.nSamplesPerSec= (DWORD)atoi(argv);
g_WAVHeader.fmtChunk.wf.nBlockAlign= g_WAVHeader.fmtChunk.wBitsPerSample*g_WAVHeader.fmtChunk.wf.nChannels/8;
g_WAVHeader.fmtChunk.wf.nAvgBytesPerSec=g_WAVHeader.fmtChunk.wf.nSamplesPerSec*g_WAVHeader.fmtChunk.wf.nBlockAlign;
g_fp=fopen(argv,"wb");
if(!g_fp)
{
fprintf(stderr,"Could not write file:%s\n",argv);
return 2;
}
//初始化录音器
if(FAILED(hr=SRInit(&SR,hWnd,g_WAVHeader.fmtChunk.wf.nAvgBytesPerSec*SR_BufDuration/1000,&g_WAVHeader.fmtChunk,ReadBuffer)))
{
SRCleanup(&SR);
fclose(g_fp);
unlink(argv);
return 2;
}
while(!g_Quit)
{
hr=SRUpdate(&SR);//不断检查缓冲区是否需要保存数据
}
SRCleanup(&SR);
g_WAVHeader.dwRIFFVal=g_uCurPos-8;
fseek(g_fp,0,SEEK_SET);
fwrite(&g_WAVHeader,1,sizeof(g_WAVHeader),g_fp);
fclose(g_fp);
return 0;
}BIN:
SRC:
页:
[1]