General information
This article will describe how to receive frames from a third-party SDK, using the Tencent Real-Time Communication (TRTC) SDK as an example.
This SDK rendered video and audio frames in two different callbacks, onRenderVideoFrame() and onPlayAudioFrame() respectively. Therefore, the best option would be to clone the frames in the Medialooks thread so that we can control the frame rate we need and use the audio buffer to take the number of audio samples we need from it, this is how it looks in the diagram:
Code Example
Create a frame by pointer using the MFFrameCreateFromMem() method:
public void onRenderVideoFrame(string userId, TRTCVideoStreamType streamType, TRTCVideoFrame frame)
{
var width = (int)frame.width;
var height = (int)frame.height;
PixelFormat pixelFormat = PixelFormats.Pbgra32;
int bytesPerPixel = (pixelFormat.BitsPerPixel + 7) / 8;
int stride = bytesPerPixel * width;
FrameProps.vidProps.dblRate = 50;
FrameProps.vidProps.fccType = eMFCC.eMFCC_ARGB32;
FrameProps.vidProps.nWidth = width;
FrameProps.vidProps.nHeight = -height;
FrameProps.vidProps.nRowBytes = stride;
IntPtr inp = Marshal.UnsafeAddrOfPinnedArrayElement(frame.data, 0);
lock(m_objLock)
{
if (pFrame != null)
Marshal.ReleaseComObject(pFrame);
m_Factory.MFFrameCreateFromMem(ref FrameProps, inp.ToInt64(), 0, 0, out pFrame, "");
}
}
Filling the audio buffer using the BufferPutPtr() method:
public void onPlayAudioFrame(TRTCAudioFrame frame, string userId)
{
int samples = (int)frame.sampleRate;
M_AUD_PROPS audioProps = new M_AUD_PROPS();
audioProps.nChannels = (int)frame.channel;
audioProps.nBitsPerSample = 16;
audioProps.nSamplesPerSec = samples;
var audioSamples = (int)frame.length / (audioProps.nBitsPerSample / 8 * audioProps.nChannels);
IntPtr aud = Marshal.UnsafeAddrOfPinnedArrayElement(frame.data, 0);
m_objAudioBuffer.BufferPutPtr("", ref audioProps, audioSamples, aud.ToInt64(), "");
}
Next, we clone the previously created frame and fill in the audio from the buffer with BufferFrameFill() method:
private void thread_DoWork(CancellationToken token)
{
while (!token.IsCancellationRequested)
{
MFFrame threadFrame = null;
lock (m_objLock)
{
if (pFrame == null)
{
Thread.Sleep(1);
continue;
}
pFrame.MFClone(out threadFrame, eMFrameClone.eMFC_Full, eMFCC.eMFCC_Default);
}
if (threadFrame != null)
{
int bufferAudioSamples = 0;
m_objAudioBuffer.BufferPropsGet("", out FrameProps.audProps, out bufferAudioSamples);
if (bufferAudioSamples != 0 && FrameProps.vidProps.dblRate != 0)
{
int samplesNeed = Convert.ToInt32(FrameProps.audProps.nSamplesPerSec / FrameProps.vidProps.dblRate);
// check that the buffer contains enough audio samples
if (bufferAudioSamples >= samplesNeed)
{
// fill a frame with audio data from the buffer
m_objAudioBuffer.BufferFrameFill("", threadFrame, ref samplesNeed, "");
//remove used samples from the buffer
m_objAudioBuffer.BufferRemove("", ref samplesNeed);
}
if (bufferAudioSamples > samplesNeed * 2)
{
m_objAudioBuffer.BufferRemove("", ref samplesNeed);
}
}
m_Preview.ReceiverFramePut(threadFrame, -1, "");
Marshal.ReleaseComObject(threadFrame);
}
}
}