Browse Source

添加: 新的录屏

zhuizhu 7 months ago
parent
commit
d5a85d15bd
94 changed files with 14157 additions and 0 deletions
  1. 476 0
      libs/Recorder/AecKsBinder.cpp
  2. 104 0
      libs/Recorder/AecKsBinder.h
  3. 1005 0
      libs/Recorder/Recorder.cpp
  4. 423 0
      libs/Recorder/Recorder.vcxproj
  5. 344 0
      libs/Recorder/Recorder.vcxproj.filters
  6. 9 0
      libs/Recorder/common.h
  7. 57 0
      libs/Recorder/d3d_helper.cpp
  8. 18 0
      libs/Recorder/d3d_helper.h
  9. 24 0
      libs/Recorder/d3d_pixelshader.hlsl
  10. 28 0
      libs/Recorder/d3d_vertexshader.hlsl
  11. 251 0
      libs/Recorder/device_audios.cpp
  12. 33 0
      libs/Recorder/device_audios.h
  13. 11 0
      libs/Recorder/device_videos.cpp
  14. 8 0
      libs/Recorder/device_videos.h
  15. 84 0
      libs/Recorder/dllmain.cpp
  16. 332 0
      libs/Recorder/encoder_aac.cpp
  17. 92 0
      libs/Recorder/encoder_aac.h
  18. 70 0
      libs/Recorder/encoder_video.cpp
  19. 83 0
      libs/Recorder/encoder_video.h
  20. 24 0
      libs/Recorder/encoder_video_define.h
  21. 43 0
      libs/Recorder/encoder_video_factory.cpp
  22. 15 0
      libs/Recorder/encoder_video_factory.h
  23. 237 0
      libs/Recorder/encoder_video_nvenc.cpp
  24. 46 0
      libs/Recorder/encoder_video_nvenc.h
  25. 222 0
      libs/Recorder/encoder_video_x264.cpp
  26. 39 0
      libs/Recorder/encoder_video_x264.h
  27. 218 0
      libs/Recorder/error_define.h
  28. 501 0
      libs/Recorder/export.cpp
  29. 295 0
      libs/Recorder/export.h
  30. 13 0
      libs/Recorder/filter.cpp
  31. 44 0
      libs/Recorder/filter.h
  32. 268 0
      libs/Recorder/filter_amix.cpp
  33. 57 0
      libs/Recorder/filter_amix.h
  34. 231 0
      libs/Recorder/filter_aresample.cpp
  35. 59 0
      libs/Recorder/filter_aresample.h
  36. 206 0
      libs/Recorder/hardware_acceleration.cpp
  37. 37 0
      libs/Recorder/hardware_acceleration.h
  38. 22 0
      libs/Recorder/headers_ffmpeg.h
  39. 4 0
      libs/Recorder/headers_mmdevice.cpp
  40. 31 0
      libs/Recorder/headers_mmdevice.h
  41. 68 0
      libs/Recorder/log_helper.cpp
  42. 63 0
      libs/Recorder/log_helper.h
  43. 498 0
      libs/Recorder/main.cpp
  44. 28 0
      libs/Recorder/mul_db.h
  45. 63 0
      libs/Recorder/muxer_define.h
  46. 907 0
      libs/Recorder/muxer_ffmpeg.cpp
  47. 100 0
      libs/Recorder/muxer_ffmpeg.h
  48. 26 0
      libs/Recorder/muxer_file.cpp
  49. 59 0
      libs/Recorder/muxer_file.h
  50. 28 0
      libs/Recorder/record_audio.cpp
  51. 97 0
      libs/Recorder/record_audio.h
  52. 18 0
      libs/Recorder/record_audio_define.h
  53. 244 0
      libs/Recorder/record_audio_dshow.cpp
  54. 43 0
      libs/Recorder/record_audio_dshow.h
  55. 43 0
      libs/Recorder/record_audio_factory.cpp
  56. 19 0
      libs/Recorder/record_audio_factory.h
  57. 615 0
      libs/Recorder/record_audio_wasapi.cpp
  58. 88 0
      libs/Recorder/record_audio_wasapi.h
  59. 23 0
      libs/Recorder/record_desktop.cpp
  60. 80 0
      libs/Recorder/record_desktop.h
  61. 45 0
      libs/Recorder/record_desktop_define.h
  62. 893 0
      libs/Recorder/record_desktop_duplication.cpp
  63. 92 0
      libs/Recorder/record_desktop_duplication.h
  64. 53 0
      libs/Recorder/record_desktop_factory.cpp
  65. 10 0
      libs/Recorder/record_desktop_factory.h
  66. 235 0
      libs/Recorder/record_desktop_ffmpeg_dshow.cpp
  67. 37 0
      libs/Recorder/record_desktop_ffmpeg_dshow.h
  68. 238 0
      libs/Recorder/record_desktop_ffmpeg_gdi.cpp
  69. 39 0
      libs/Recorder/record_desktop_ffmpeg_gdi.h
  70. 300 0
      libs/Recorder/record_desktop_gdi.cpp
  71. 51 0
      libs/Recorder/record_desktop_gdi.h
  72. 389 0
      libs/Recorder/record_desktop_mag.cpp
  73. 93 0
      libs/Recorder/record_desktop_mag.h
  74. 154 0
      libs/Recorder/record_desktop_wgc.cpp
  75. 67 0
      libs/Recorder/record_desktop_wgc.h
  76. 294 0
      libs/Recorder/remuxer_ffmpeg.cpp
  77. 56 0
      libs/Recorder/remuxer_ffmpeg.h
  78. 96 0
      libs/Recorder/resample_pcm.cpp
  79. 33 0
      libs/Recorder/resample_pcm.h
  80. 96 0
      libs/Recorder/ring_buffer.cpp
  81. 120 0
      libs/Recorder/ring_buffer.h
  82. 97 0
      libs/Recorder/sws_helper.cpp
  83. 36 0
      libs/Recorder/sws_helper.h
  84. 38 0
      libs/Recorder/system_error.cpp
  85. 12 0
      libs/Recorder/system_error.h
  86. 48 0
      libs/Recorder/system_lib.cpp
  87. 10 0
      libs/Recorder/system_lib.h
  88. 32 0
      libs/Recorder/system_time.cpp
  89. 19 0
      libs/Recorder/system_time.h
  90. 165 0
      libs/Recorder/system_version.cpp
  91. 37 0
      libs/Recorder/system_version.h
  92. 1009 0
      libs/Recorder/transcode_aac.cpp
  93. 66 0
      libs/Recorder/utils_string.cpp
  94. 23 0
      libs/Recorder/utils_string.h

+ 476 - 0
libs/Recorder/AecKsBinder.cpp

@@ -0,0 +1,476 @@
+//-------------------------------------------------------------------------
+// File: AecKsBinder.cpp
+// 
+// Desciption: Definition of audio devices binding functions 
+//
+// Copyright (c) 2004-2006, Microsoft Corporation. All rights reserved.
+//---------------------------------------------------------------------------
+#include "stdafx.h"
+#include "AecKsbinder.h"
+#include "strsafe.h"
+#include "functiondiscoverykeys.h"           // PKEY_Device_FriendlyName
+
+#ifndef IF_FAILED_JUMP
+#define IF_FAILED_JUMP(hr, label) if(FAILED(hr)) goto label;
+#endif 
+
+#ifndef IF_FAILED_RETURN
+#define IF_FAILED_RETURN(hr) if(FAILED(hr)) return hr;
+#endif 
+
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Function:
+//      DeviceBindTo
+//
+// Description:
+//      Bind device to an IAudioClient interface.
+//
+//  Parameters:
+//      eDataFlow: eRender for render device, eCapture for capture device
+//      iDevIdx: Index of device in the enumeration list. If it is -1, use default device 
+//      ppVoid: pointer pointer to IAudioClient interface.
+//      ppszEndpointDeviceId: Device ID. Caller is responsible for freeing memeory
+//              using CoTaskMemoryFree. If can be NULL if called doesn't need this info.
+//
+// Return:
+//      S_OK if successful
+//
+///////////////////////////////////////////////////////////////////////////////
+HRESULT DeviceBindTo(
+        EDataFlow eDataFlow,    // eCapture/eRender
+        INT iDevIdx,        // Device Index. -1 - default device. 
+        IAudioClient **ppAudioClient,    // pointer pointer to IAudioClient interface
+        IAudioEndpointVolume **ppEndpointVolume,
+        WCHAR** ppszEndpointDeviceId)   // Device ID. Need to be freed in caller with CoTaskMemoryFree if it is not NULL
+{
+    HRESULT hResult;
+
+    CComPtr<IMMDeviceEnumerator> spEnumerator;
+    CComPtr<IMMDeviceCollection> spEndpoints;    
+    CComPtr<IMMDevice> spDevice;
+    WCHAR *pszDeviceId = NULL;
+
+    if (ppAudioClient == NULL) 
+        return E_POINTER;
+    
+    *ppAudioClient = NULL;
+
+    hResult = spEnumerator.CoCreateInstance(__uuidof(MMDeviceEnumerator));
+    IF_FAILED_JUMP(hResult, Exit);
+
+    // use default device
+    if (iDevIdx < 0 )
+    {
+        hResult = spEnumerator->GetDefaultAudioEndpoint(eDataFlow, eConsole, &spDevice);
+        IF_FAILED_JUMP(hResult, Exit);
+    }else{
+        // User selected device
+        hResult = spEnumerator->EnumAudioEndpoints(eDataFlow, DEVICE_STATE_ACTIVE, &spEndpoints);
+        IF_FAILED_JUMP(hResult, Exit);
+
+        hResult = spEndpoints->Item(iDevIdx, &spDevice);
+        IF_FAILED_JUMP(hResult, Exit);
+    }
+
+    // get device ID and format
+    hResult = spDevice->GetId(&pszDeviceId);
+    IF_FAILED_JUMP(hResult, Exit);
+
+    // Active device
+    hResult = spDevice->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, (void**)ppAudioClient);
+    IF_FAILED_JUMP(hResult, Exit);
+
+    if (ppEndpointVolume)
+    {
+        hResult = spDevice->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_INPROC_SERVER, NULL, (void **)ppEndpointVolume);
+        IF_FAILED_JUMP(hResult, Exit);
+    }
+    
+Exit:
+    if (ppszEndpointDeviceId)
+        *ppszEndpointDeviceId = pszDeviceId;
+    else if (pszDeviceId)
+        CoTaskMemFree(pszDeviceId);
+    
+    return hResult;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Function:
+//      GetDeviceNum
+//
+// Description:
+//      Enumerate audio device and return the device information.
+//
+//  Parameters:
+//      eDataFlow: eRender for render device, eCapture for capture device
+//      uDevCount: Number of device
+//
+// Return:
+//      S_OK if successful
+//
+///////////////////////////////////////////////////////////////////////////////
+HRESULT GetDeviceNum(EDataFlow eDataFlow, UINT &uDevCount)
+{
+    HRESULT hResult = S_OK;
+
+    CComPtr<IMMDeviceEnumerator> spEnumerator;
+    CComPtr<IMMDeviceCollection> spEndpoints;    
+
+    hResult = spEnumerator.CoCreateInstance(__uuidof(MMDeviceEnumerator));
+    IF_FAILED_RETURN(hResult);
+
+    hResult = spEnumerator->EnumAudioEndpoints(eDataFlow, DEVICE_STATE_ACTIVE, &spEndpoints);
+    IF_FAILED_RETURN(hResult);
+
+    hResult = spEndpoints->GetCount(&uDevCount);
+    IF_FAILED_RETURN(hResult);
+
+    return hResult;
+
+}
+
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Function:
+//      EnumDevice
+//
+// Description:
+//      Enumerate audio device and return the device information.
+//
+//  Parameters:
+//      eDataFlow: eRender for render device, eCapture for capture device
+//      uNumElements: Size of audio device info structure array.
+//      pDevicInfo: device info structure array. Caller is responsible to allocate and free
+//                  memory. The array size is specified by uNumElements.
+//
+// Return:
+//      S_OK if successful
+//
+///////////////////////////////////////////////////////////////////////////////
+HRESULT EnumDevice(EDataFlow eDataFlow, UINT uNumElements, AUDIO_DEVICE_INFO *pDevicInfo)
+{
+    HRESULT hResult = S_OK;
+    TCHAR* pszDeviceId = NULL;
+    PROPVARIANT value;
+    UINT index, dwCount;
+    bool IsMicArrayDevice;
+
+    CComPtr<IMMDeviceEnumerator> spEnumerator;
+    CComPtr<IMMDeviceCollection> spEndpoints;    
+
+    hResult = spEnumerator.CoCreateInstance(__uuidof(MMDeviceEnumerator));
+    IF_FAILED_JUMP(hResult, Exit);
+
+    hResult = spEnumerator->EnumAudioEndpoints(eDataFlow, DEVICE_STATE_ACTIVE, &spEndpoints);
+    IF_FAILED_JUMP(hResult, Exit);
+
+    hResult = spEndpoints->GetCount(&dwCount);
+    IF_FAILED_JUMP(hResult, Exit);
+
+    if (dwCount != uNumElements)
+        return E_INVALIDARG;
+
+    ZeroMemory(pDevicInfo, sizeof(AUDIO_DEVICE_INFO)*uNumElements);
+    
+    for (index = 0; index < dwCount; index++)
+    {
+        CComPtr<IMMDevice> spDevice;
+        CComPtr<IPropertyStore> spProperties;
+
+        PropVariantInit(&value);
+
+        hResult = spEndpoints->Item(index, &spDevice);
+        IF_FAILED_JUMP(hResult, Exit);
+         
+        hResult = spDevice->GetId(&pszDeviceId);
+        IF_FAILED_JUMP(hResult, Exit);
+
+        hResult = spDevice->OpenPropertyStore(STGM_READ, &spProperties);
+        IF_FAILED_JUMP(hResult, Exit);
+
+        hResult = spProperties->GetValue(PKEY_Device_FriendlyName, &value);
+        IF_FAILED_JUMP(hResult, Exit);
+
+        EndpointIsMicArray(spDevice, IsMicArrayDevice);
+
+        StringCchCopy(pDevicInfo[index].szDeviceID, MAX_STR_LEN-1, pszDeviceId);
+        StringCchCopy(pDevicInfo[index].szDeviceName, MAX_STR_LEN-1, value.pszVal);
+        pDevicInfo[index].bIsMicArrayDevice = IsMicArrayDevice;
+        
+        PropVariantClear(&value);
+        CoTaskMemFree(pszDeviceId);
+        pszDeviceId = NULL;
+    }
+
+Exit:
+    return hResult;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Function:
+//      DeviceIsMicArray
+//
+// Description:
+//      Determines if a given IMMDevice is a microphone array by device ID
+//
+// Returns:
+//      S_OK on success
+///////////////////////////////////////////////////////////////////////////////
+HRESULT DeviceIsMicArray(wchar_t szDeviceId[], bool &bIsMicArray)
+{
+    HRESULT hr = S_OK;
+    
+    if (szDeviceId == NULL)
+        return E_INVALIDARG;
+   
+    CComPtr<IMMDeviceEnumerator> spEnumerator;
+    CComPtr<IMMDevice> spDevice;
+
+    hr = spEnumerator.CoCreateInstance(__uuidof(MMDeviceEnumerator));
+    IF_FAILED_RETURN(hr);
+
+    hr = spEnumerator->GetDevice(szDeviceId, &spDevice);
+    IF_FAILED_RETURN(hr);
+
+    hr = EndpointIsMicArray(spDevice, bIsMicArray);
+
+    return hr;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Function:
+//      EndpointIsMicArray
+//
+// Description:
+//      Determines if a given IMMDevice is a microphone array by Endpoint pointer
+//
+// Returns:
+//      S_OK on success
+///////////////////////////////////////////////////////////////////////////////
+HRESULT EndpointIsMicArray(IMMDevice* pEndpoint, bool & isMicrophoneArray)
+{
+    if (pEndpoint == NULL)
+        return E_POINTER;
+
+    GUID subType = {0};
+
+    HRESULT hr = GetJackSubtypeForEndpoint(pEndpoint, &subType);
+
+    isMicrophoneArray = (subType == KSNODETYPE_MICROPHONE_ARRAY) ? true : false;
+
+    return hr;
+}// EndpointIsMicArray()
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Function:
+//      GetJackSubtypeForEndpoint
+//
+// Description:
+//      Gets the subtype of the jack that the specified endpoint device
+//      is plugged into.  e.g. if the endpoint is for an array mic, then
+//      we would expect the subtype of the jack to be 
+//      KSNODETYPE_MICROPHONE_ARRAY
+//
+// Return:
+//      S_OK if successful
+//
+///////////////////////////////////////////////////////////////////////////////
+HRESULT GetJackSubtypeForEndpoint(IMMDevice* pEndpoint, GUID* pgSubtype)
+{
+    HRESULT hr = S_OK;
+
+    if (pEndpoint == NULL)
+        return E_POINTER;
+   
+    CComPtr<IDeviceTopology>    spEndpointTopology;
+    CComPtr<IConnector>         spPlug;
+    CComPtr<IConnector>         spJack;
+    CComQIPtr<IPart>            spJackAsPart;
+
+    // Get the Device Topology interface
+    hr = pEndpoint->Activate(__uuidof(IDeviceTopology), CLSCTX_INPROC_SERVER, 
+                            NULL, (void**)&spEndpointTopology);
+    IF_FAILED_JUMP(hr, Exit);
+
+    hr = spEndpointTopology->GetConnector(0, &spPlug);
+    IF_FAILED_JUMP(hr, Exit);
+
+    hr = spPlug->GetConnectedTo(&spJack);
+    IF_FAILED_JUMP(hr, Exit);
+
+    spJackAsPart = spJack;
+
+    hr = spJackAsPart->GetSubType(pgSubtype);
+
+Exit:
+   return hr;
+}//GetJackSubtypeForEndpoint()
+
+
+///////////////////////////////////////////////////////////////////////////////
+// GetInputJack() -- Gets the IPart interface for the input jack on the
+//                   specified device.
+///////////////////////////////////////////////////////////////////////////////
+HRESULT GetInputJack(IMMDevice* pDevice, CComPtr<IPart>& spPart)
+{
+    HRESULT hr = S_OK;
+
+    if (pDevice == NULL)
+        return E_POINTER;
+
+    CComPtr<IDeviceTopology>    spTopology;
+    CComPtr<IConnector>         spPlug;
+    CComPtr<IConnector>         spJack = NULL;
+
+    // Get the Device Topology interface
+    hr = pDevice->Activate(__uuidof(IDeviceTopology), 
+                                  CLSCTX_INPROC_SERVER, NULL,
+                                  reinterpret_cast<void**>(&spTopology));
+    IF_FAILED_RETURN(hr);
+
+    hr = spTopology->GetConnector(0, &spPlug);
+    IF_FAILED_RETURN(hr);
+
+    hr = spPlug->GetConnectedTo(&spJack);
+    IF_FAILED_RETURN(hr);
+
+    // QI for the part
+    spPart = spJack;
+    if (spPart == NULL)
+        return E_NOINTERFACE;
+    
+    return hr;
+}// GetInputJack()
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Function:
+//      GetMicArrayGeometry()
+//
+// Description:
+//      Obtains the geometry for the specified mic array.
+//
+// Parameters:  szDeviceId -- The requested device ID, which can be obtained
+//                            from calling EnumAudioCaptureDevices()
+//              
+//              ppGeometry -- Address of the pointer to the mic-array gemometry.  
+//                            Caller is ressponsible for calling CoTaskMemFree()
+//                            if the call is successfull.
+//
+//              cbSize -- size of the geometry structure
+//
+// Returns:     S_OK on success
+///////////////////////////////////////////////////////////////////////////////
+HRESULT GetMicArrayGeometry(wchar_t szDeviceId[], KSAUDIO_MIC_ARRAY_GEOMETRY** ppGeometry, ULONG& cbSize)
+{
+    HRESULT hr = S_OK;
+
+    if (szDeviceId == NULL)
+        return E_INVALIDARG;
+    if (ppGeometry == NULL)
+        return E_POINTER;
+   
+    cbSize = 0;
+    CComPtr<IMMDeviceEnumerator> spEnumerator;
+    CComPtr<IMMDevice>           spDevice;
+    CComQIPtr<IPart>             spPart;
+    bool bIsMicArray;
+
+    hr = spEnumerator.CoCreateInstance(__uuidof(MMDeviceEnumerator));
+    IF_FAILED_RETURN(hr);
+
+    hr = spEnumerator->GetDevice(szDeviceId, &spDevice);
+    IF_FAILED_RETURN(hr);
+
+    hr = EndpointIsMicArray(spDevice, bIsMicArray);
+    IF_FAILED_RETURN(hr);
+
+    if (!bIsMicArray)
+        return E_FAIL;
+    
+    UINT nPartId = 0;
+    hr = GetInputJack(spDevice, spPart);
+    IF_FAILED_RETURN(hr);
+
+    hr = spPart->GetLocalId(&nPartId);
+    IF_FAILED_RETURN(hr);
+
+    CComPtr<IDeviceTopology>     spTopology;
+    CComPtr<IMMDeviceEnumerator> spEnum;
+    CComPtr<IMMDevice>           spJackDevice;
+    CComPtr<IKsControl>          spKsControl;
+    wchar_t *                    pwstrDevice = 0;
+
+    // Get the topology object for the part
+    hr = spPart->GetTopologyObject(&spTopology);
+    IF_FAILED_RETURN(hr);
+
+    // Get the id of the IMMDevice that this topology object describes.
+    hr = spTopology->GetDeviceId(&pwstrDevice);
+    IF_FAILED_RETURN(hr);
+
+    // Get an IMMDevice pointer using the ID
+    hr = spEnum.CoCreateInstance(__uuidof(MMDeviceEnumerator));
+    IF_FAILED_JUMP(hr, Exit);
+
+    hr = spEnum->GetDevice(pwstrDevice, &spJackDevice);
+    IF_FAILED_JUMP(hr, Exit);
+
+    // Activate IKsControl on the IMMDevice
+    hr = spJackDevice->Activate(__uuidof(IKsControl), CLSCTX_INPROC_SERVER, 
+                               NULL, reinterpret_cast<void**>(&spKsControl));
+    IF_FAILED_JUMP(hr, Exit);
+
+    // At this point we can use IKsControl just as we would use DeviceIoControl
+    KSP_PIN ksp;
+    ULONG   cbData     = 0;
+    ULONG   cbGeometry = 0;
+
+    // Inititialize the pin property
+    ::ZeroMemory(&ksp, sizeof(ksp));
+    ksp.Property.Set     = KSPROPSETID_Audio;
+    ksp.Property.Id      = KSPROPERTY_AUDIO_MIC_ARRAY_GEOMETRY;
+    ksp.Property.Flags   = KSPROPERTY_TYPE_GET;
+    ksp.PinId            = nPartId & PARTID_MASK;  
+
+    // Get data size by passing NULL
+    hr = spKsControl->KsProperty(reinterpret_cast<PKSPROPERTY>(&ksp), 
+                                sizeof(ksp), NULL, 0, &cbGeometry);
+    IF_FAILED_JUMP(hr, Exit);
+   
+    // Allocate memory for the microphone array geometry
+    *ppGeometry = reinterpret_cast<KSAUDIO_MIC_ARRAY_GEOMETRY*>
+                                   (::CoTaskMemAlloc(cbGeometry));
+
+    if(*ppGeometry == 0)
+    {
+        hr = E_OUTOFMEMORY;
+    }
+    IF_FAILED_JUMP(hr, Exit);
+   
+    // Now retriev the mic-array structure...
+    DWORD cbOut = 0;
+    hr = spKsControl->KsProperty(reinterpret_cast<PKSPROPERTY>(&ksp), 
+                                sizeof(ksp), *ppGeometry, cbGeometry,
+                                &cbOut);
+    IF_FAILED_JUMP(hr, Exit);
+    cbSize = cbGeometry;
+   
+Exit:
+    if(pwstrDevice != 0)
+    {
+        ::CoTaskMemFree(pwstrDevice);
+    }
+    return hr;
+}//GetMicArrayGeometry()
+
+

+ 104 - 0
libs/Recorder/AecKsBinder.h

@@ -0,0 +1,104 @@
+//-------------------------------------------------------------------------
+// File: AecKsBinder.h
+// 
+// Desciption: Definition of audio devices binding functions 
+//
+// Copyright (c) 2004-2006, Microsoft Corporation. All rights reserved.
+//---------------------------------------------------------------------------
+
+#ifndef _AEC_KSBINDER_H_
+#define _AEC_KSBINDER_H_
+
+#include <atlbase.h>
+#include <ATLComCli.h>
+#include <audioclient.h>
+#include <MMDeviceApi.h>
+#include <AudioEngineEndPoint.h>
+#include <DeviceTopology.h>
+#include <EndpointVolume.h>
+
+typedef struct 
+{
+    KSPROPERTY KsProperty;
+    BOOLEAN bEndpointFlag;
+    ULONG ulEntityId;
+    union {
+         ULONG ulEndpoint;
+         ULONG ulInterface;
+    };
+    ULONG ulOffset;
+} USBAUDIO_MEMORY_PROPERTY, *PUSBAUDIO_MEMORY_PROPERTY;
+
+static const GUID USB_AUDIO_PROP_SET_GUID = 
+     {0xC3FA16D7, 0x274E, 0x4f2b, 
+     {0xA6, 0x3B, 0xD5, 0xE1, 0x09, 0x55, 0xFA, 0x27}};
+const DWORD USBAUDIO_PROPERTY_GETSET_MEM = 0;
+
+#define MAX_STR_LEN 512
+typedef struct
+{
+    TCHAR szDeviceName[MAX_STR_LEN];
+	TCHAR szDeviceID[MAX_STR_LEN];
+    bool bIsMicArrayDevice;
+} AUDIO_DEVICE_INFO, *PAUDIO_DEVICE_INFO;
+    
+
+HRESULT GetDeviceNum(EDataFlow eDataFlow, UINT &uDevCount);
+
+__inline HRESULT GetRenderDeviceNum(UINT &uDevCount)
+{ return GetDeviceNum(eRender, uDevCount); }
+
+__inline HRESULT GetCaptureDeviceNum(UINT &uDevCount)
+{ return GetDeviceNum(eCapture, uDevCount); }
+
+
+HRESULT EnumDevice(
+    EDataFlow eDataFlow, 
+    UINT  uNumElements,
+    AUDIO_DEVICE_INFO *pDevicInfo);
+
+__inline HRESULT EnumRenderDevice(UINT  uNumElements, AUDIO_DEVICE_INFO *pDevicInfo) 
+    { return EnumDevice(eRender, uNumElements, pDevicInfo); }
+
+__inline HRESULT EnumCaptureDevice(UINT  uNumElements, AUDIO_DEVICE_INFO *pDevicInfo) 
+    { return EnumDevice(eCapture, uNumElements, pDevicInfo); }
+
+
+HRESULT DeviceBindTo(
+        EDataFlow eDataFlow,        // eCapture or eRender
+        INT uDevIdx,                // Device Index. USE_DEFAULT_DEVICE - use default device. 
+        IAudioClient **ppAudioClient,    // pointer pointer to IAudioClient interface
+        IAudioEndpointVolume **ppEndpointVolume,
+        WCHAR** ppszEndpointDeviceId   // Device ID. Need to be freed in caller with CoTaskMemoryFree
+);
+
+__inline HRESULT CaptureDeviceBindTo(
+        INT uDevIdx, 
+        IAudioClient **ppAudioClient,    // pointer pointer to IAudioClient interface
+        IAudioEndpointVolume **ppEndpointVolume,
+        WCHAR** ppszEndpointDeviceId)
+{
+    return DeviceBindTo(eCapture, uDevIdx, ppAudioClient, ppEndpointVolume, ppszEndpointDeviceId);
+}
+
+__inline HRESULT RenderDeviceBindTo(
+        INT uDevIdx, 
+        IAudioClient **ppAudioClient,    // pointer pointer to IAudioClient interface
+        IAudioEndpointVolume **ppEndpointVolume,
+        WCHAR** ppszEndpointDeviceId)
+{
+    return DeviceBindTo(eRender, uDevIdx, ppAudioClient, ppEndpointVolume, ppszEndpointDeviceId);
+}
+
+HRESULT DeviceIsMicArray(wchar_t szDeviceId[], bool& bIsMicArray);
+
+HRESULT EndpointIsMicArray(IMMDevice* pEndpoint, bool& isMicrophoneArray);
+
+HRESULT GetJackSubtypeForEndpoint(IMMDevice* pEndpoint, GUID* pgSubtype);
+
+__checkReturn HRESULT GetInputJack(IMMDevice* pDevice, CComPtr<IPart>& spPart);
+
+HRESULT GetMicArrayGeometry(wchar_t szDeviceId[], KSAUDIO_MIC_ARRAY_GEOMETRY** ppGeometry, ULONG& cbSize);
+
+#endif //_AEC_KSBINDER_H_
+

+ 1005 - 0
libs/Recorder/Recorder.cpp

@@ -0,0 +1,1005 @@
+// Recorder.cpp : Defines the entry point for the console application.
+//
+
+#include <exception>
+#include <iostream>
+
+extern "C" {
+#include "common.h"
+}
+
+
+void capture_screen()
+{
+	try {
+		av_register_all();
+		avdevice_register_all();
+
+		AVFormatContext *pFormatCtx = avformat_alloc_context();
+
+		AVInputFormat *ifmt = av_find_input_format("gdigrab");
+		avformat_open_input(&pFormatCtx, "desktop", ifmt, NULL);
+
+		if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
+			throw std::exception("could not find stream information");
+		}
+
+		int videoIndex = -1;
+		for (int i = 0; i < pFormatCtx->nb_streams; i++) {
+			if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+				videoIndex = i;
+				break;
+			}
+		}
+
+		if (videoIndex == -1)
+			throw std::exception("could not find a video stream");
+
+		AVCodecContext *pDecoderCtx = pFormatCtx->streams[videoIndex]->codec;
+
+		//init decoder
+		AVCodec *pDecoder = avcodec_find_decoder(pDecoderCtx->codec_id);
+
+		if (pDecoder == NULL)
+			throw std::exception("could not find codec");
+
+		if (avcodec_open2(pDecoderCtx, pDecoder, NULL) != 0) {
+			throw std::exception("open codec ffailed");
+		}
+
+		//init transfer and buffer
+		AVFrame *pFrameRGB, *pFrameYUV;
+		pFrameRGB = av_frame_alloc();
+		pFrameYUV = av_frame_alloc();
+
+		uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pDecoderCtx->width, pDecoderCtx->height));
+		avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pDecoderCtx->width, pDecoderCtx->height);
+
+		int ret, gotPic = 0;
+
+		AVPacket *packet = (AVPacket*)av_malloc(sizeof(AVPacket));
+
+		struct SwsContext *imgConvertCtx = NULL;
+		imgConvertCtx = sws_getContext(
+			pDecoderCtx->width,
+			pDecoderCtx->height,
+			pDecoderCtx->pix_fmt,
+			pDecoderCtx->width,
+			pDecoderCtx->height,
+			AV_PIX_FMT_YUV420P,
+			SWS_BICUBIC,
+			NULL, NULL, NULL);
+
+		//init encoder
+		AVCodec *pEncoder = avcodec_find_encoder(AV_CODEC_ID_H264);
+		if (pEncoder == NULL)
+			throw std::exception("could not find encoder");
+
+		AVCodecContext *pEncoderCtx = avcodec_alloc_context3(pEncoder);
+		if (pEncoderCtx == NULL)
+			throw std::exception("could not alloc context for encoder");
+
+		pEncoderCtx->codec_id = AV_CODEC_ID_H264;
+		pEncoderCtx->codec_type = AVMEDIA_TYPE_VIDEO;
+		pEncoderCtx->pix_fmt = AV_PIX_FMT_YUV420P;
+		pEncoderCtx->width = pDecoderCtx->width;
+		pEncoderCtx->height = pDecoderCtx->height;
+		pEncoderCtx->time_base.num = 1;
+		pEncoderCtx->time_base.den = 20;//֡��(��һ���Ӷ�����ͼƬ)
+		pEncoderCtx->bit_rate = 4000000; //������(���������С���Ըı�������Ƶ������)
+		pEncoderCtx->gop_size = 12;
+
+		if (pEncoderCtx->flags & AVFMT_GLOBALHEADER)
+			pEncoderCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+		AVDictionary *param = 0;
+
+		av_dict_set(&param, "preset", "superfast", 0);
+		av_dict_set(&param, "tune", "zerolatency", 0);
+
+		pEncoder = avcodec_find_encoder(pEncoderCtx->codec_id);
+		if (pEncoder == NULL)
+			throw std::exception("could not find encoder by context");
+
+		if (avcodec_open2(pEncoderCtx, pEncoder, &param) != 0)
+			throw std::exception("open encoder failed");
+
+		AVFrame *pFrame264 = av_frame_alloc();
+		int frame264Size = avpicture_get_size(pEncoderCtx->pix_fmt, pEncoderCtx->width, pEncoderCtx->height);
+		uint8_t *frame264Buf = (uint8_t*)av_malloc(frame264Size);
+		avpicture_fill((AVPicture*)pFrame264, frame264Buf, pEncoderCtx->pix_fmt, pEncoderCtx->width, pEncoderCtx->height);
+
+		AVPacket pkt264;
+		av_new_packet(&pkt264, frame264Size);
+
+		int Y_size = pEncoderCtx->width * pEncoderCtx->height;
+
+		for (;;) {
+			if (av_read_frame(pFormatCtx, packet) < 0) {
+				break;
+			}
+
+			if (packet->stream_index == videoIndex) {
+				ret = avcodec_decode_video2(pDecoderCtx, pFrameRGB, &gotPic, packet);
+				if (ret < 0) {
+					throw std::exception("decode error");
+				}
+
+				//trans rgb to yuv420 in out_buffer
+				if (gotPic) {
+					sws_scale(imgConvertCtx, (const uint8_t* const*)pFrameRGB->data, pFrameRGB->linesize, 0, pDecoderCtx->height, pFrameYUV->data, pFrameYUV->linesize);
+
+					gotPic = 0;
+					pFrame264->data[0] = out_buffer;//Y
+					pFrame264->data[1] = out_buffer + Y_size;//U
+					pFrame264->data[2] = out_buffer + Y_size * 5 / 4;//V
+
+					ret = avcodec_encode_video2(pEncoderCtx, &pkt264, pFrame264, &gotPic);
+					if (gotPic == 1) {
+						printf("264 packet:%d\r\n", pkt264.size);
+
+						static FILE *fp = fopen("a.264", "wb+");
+
+						fwrite(pkt264.data, 1, pkt264.size, fp);
+
+						av_free_packet(&pkt264);
+					}
+				}
+
+				_sleep(50);
+			}
+
+			av_free_packet(packet);
+		}//for(;;��
+
+		av_free(pFrameYUV);
+		avcodec_close(pDecoderCtx);
+		avformat_close_input(&pFormatCtx);
+	}
+	catch (std::exception ex) {
+		printf("%s\r\n", ex.what());
+	}
+}
+
+
+#include <MMDeviceAPI.h>
+#include <AudioClient.h>
+#include <AudioPolicy.h>
+
+#define REFTIMES_PER_SEC  10000000
+#define REFTIMES_PER_MILLISEC  10000
+
+#define EXIT_ON_ERROR(hres)  \
+              if (FAILED(hres)) { goto Exit; }
+#define SAFE_RELEASE(punk)  \
+              if ((punk) != NULL)  \
+                { (punk)->Release(); (punk) = NULL; }
+
+const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
+const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
+const IID IID_IAudioClient = __uuidof(IAudioClient);
+const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
+
+#define MoveMemory RtlMoveMemory
+#define CopyMemory RtlCopyMemory
+#define FillMemory RtlFillMemory
+#define ZeroMemory RtlZeroMemory
+
+#define min(a,b)            (((a) < (b)) ? (a) : (b))
+
+//
+//  WAV file writer.
+//
+//  This is a VERY simple .WAV file writer.
+//
+
+//
+//  A wave file consists of:
+//
+//  RIFF header:    8 bytes consisting of the signature "RIFF" followed by a 4 byte file length.
+//  WAVE header:    4 bytes consisting of the signature "WAVE".
+//  fmt header:     4 bytes consisting of the signature "fmt " followed by a WAVEFORMATEX 
+//  WAVEFORMAT:     <n> bytes containing a waveformat structure.
+//  DATA header:    8 bytes consisting of the signature "data" followed by a 4 byte file length.
+//  wave data:      <m> bytes containing wave data.
+//
+//
+//  Header for a WAV file - we define a structure describing the first few fields in the header for convenience.
+//
+struct WAVEHEADER
+{
+	DWORD   dwRiff;                     // "RIFF"
+	DWORD   dwSize;                     // Size
+	DWORD   dwWave;                     // "WAVE"
+	DWORD   dwFmt;                      // "fmt "
+	DWORD   dwFmtSize;                  // Wave Format Size
+};
+
+//  Static RIFF header, we'll append the format to it.
+const BYTE WaveHeader[] =
+{
+	'R',   'I',   'F',   'F',  0x00,  0x00,  0x00,  0x00, 'W',   'A',   'V',   'E',   'f',   'm',   't',   ' ', 0x00, 0x00, 0x00, 0x00
+};
+
+//  Static wave DATA tag.
+const BYTE WaveData[] = { 'd', 'a', 't', 'a' };
+
+//
+//  Write the contents of a WAV file.  We take as input the data to write and the format of that data.
+//
+bool WriteWaveFile(HANDLE FileHandle, const BYTE *Buffer, const size_t BufferSize, const WAVEFORMATEX *WaveFormat)
+{
+	DWORD waveFileSize = sizeof(WAVEHEADER) + sizeof(WAVEFORMATEX) + WaveFormat->cbSize + sizeof(WaveData) + sizeof(DWORD) + static_cast<DWORD>(BufferSize);
+	BYTE *waveFileData = new (std::nothrow) BYTE[waveFileSize];
+	BYTE *waveFilePointer = waveFileData;
+	WAVEHEADER *waveHeader = reinterpret_cast<WAVEHEADER *>(waveFileData);
+
+	if (waveFileData == NULL)
+	{
+		printf("Unable to allocate %d bytes to hold output wave data\n", waveFileSize);
+		return false;
+	}
+
+	//
+	//  Copy in the wave header - we'll fix up the lengths later.
+	//
+	CopyMemory(waveFilePointer, WaveHeader, sizeof(WaveHeader));
+	waveFilePointer += sizeof(WaveHeader);
+
+	//
+	//  Update the sizes in the header.
+	//
+	waveHeader->dwSize = waveFileSize - (2 * sizeof(DWORD));
+	waveHeader->dwFmtSize = sizeof(WAVEFORMATEX) + WaveFormat->cbSize;
+
+	//
+	//  Next copy in the WaveFormatex structure.
+	//
+	CopyMemory(waveFilePointer, WaveFormat, sizeof(WAVEFORMATEX) + WaveFormat->cbSize);
+	waveFilePointer += sizeof(WAVEFORMATEX) + WaveFormat->cbSize;
+
+
+	//
+	//  Then the data header.
+	//
+	CopyMemory(waveFilePointer, WaveData, sizeof(WaveData));
+	waveFilePointer += sizeof(WaveData);
+	*(reinterpret_cast<DWORD *>(waveFilePointer)) = static_cast<DWORD>(BufferSize);
+	waveFilePointer += sizeof(DWORD);
+
+	//
+	//  And finally copy in the audio data.
+	//
+	CopyMemory(waveFilePointer, Buffer, BufferSize);
+
+	//
+	//  Last but not least, write the data to the file.
+	//
+	DWORD bytesWritten;
+	if (!WriteFile(FileHandle, waveFileData, waveFileSize, &bytesWritten, NULL))
+	{
+		printf("Unable to write wave file: %d\n", GetLastError());
+		delete[]waveFileData;
+		return false;
+	}
+
+	if (bytesWritten != waveFileSize)
+	{
+		printf("Failed to write entire wave file\n");
+		delete[]waveFileData;
+		return false;
+	}
+	delete[]waveFileData;
+	return true;
+}
+
+//
+//  Write the captured wave data to an output file so that it can be examined later.
+//
+void SaveWaveData(BYTE *CaptureBuffer, size_t BufferSize, const WAVEFORMATEX *WaveFormat)
+{
+	HRESULT hr = NOERROR;
+
+	SYSTEMTIME st;
+	GetLocalTime(&st);
+	char waveFileName[_MAX_PATH] = { 0 };
+	sprintf(waveFileName, ".\\WAS_%04d-%02d-%02d_%02d_%02d_%02d_%02d.wav",
+		st.wYear, st.wMonth, st.wDay,
+		st.wHour, st.wMinute, st.wSecond, st.wMilliseconds);
+
+	HANDLE waveHandle = CreateFile(waveFileName, GENERIC_WRITE, FILE_SHARE_READ, NULL, CREATE_ALWAYS,
+		FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN,
+		NULL);
+	if (waveHandle != INVALID_HANDLE_VALUE)
+	{
+		if (WriteWaveFile(waveHandle, CaptureBuffer, BufferSize, WaveFormat))
+		{
+			printf("Successfully wrote WAVE data to %s\n", waveFileName);
+		}
+		else
+		{
+			printf("Unable to write wave file\n");
+		}
+		CloseHandle(waveHandle);
+	}
+	else
+	{
+		printf("Unable to open output WAV file %s: %d\n", waveFileName, GetLastError());
+	}
+
+}
+
+static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt);
+static int select_sample_rate(const AVCodec *codec);
+
+BOOL AdjustFormatTo16Bits(WAVEFORMATEX *pwfx)
+{
+	BOOL bRet(FALSE);
+
+	if (pwfx->wFormatTag == WAVE_FORMAT_IEEE_FLOAT)
+	{
+		pwfx->wFormatTag = WAVE_FORMAT_PCM;
+		pwfx->wBitsPerSample = 16;
+		pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
+		pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
+
+		bRet = TRUE;
+	}
+	else if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
+	{
+		PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
+		if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
+		{
+			pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+			pEx->Samples.wValidBitsPerSample = 16;
+			pwfx->wBitsPerSample = 16;
+			pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
+			pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
+
+			bRet = TRUE;
+		}
+	}
+
+	return bRet;
+}
+
+#define DEF_CAPTURE_MIC
+
+HRESULT capture_audio()
+{
+	HRESULT hr;
+	REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC;
+	REFERENCE_TIME hnsActualDuration;
+	UINT32 bufferFrameCount;
+	UINT32 numFramesAvailable;
+	IMMDeviceEnumerator *pEnumerator = NULL;
+	IMMDevice *pDevice = NULL;
+	IAudioClient *pAudioClient = NULL;
+	IAudioCaptureClient *pCaptureClient = NULL;
+	WAVEFORMATEX *pwfx = NULL;
+	UINT32 packetLength = 0;
+	BOOL bDone = FALSE;
+	BYTE *pData;
+	DWORD flags;
+
+		/*CoTaskMemFree(pwfx);
+	SAFE_RELEASE(pEnumerator)
+		SAFE_RELEASE(pDevice)
+		SAFE_RELEASE(pAudioClient)
+		SAFE_RELEASE(pCaptureClient)*/
+
+	hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
+
+	hr = CoCreateInstance(
+		CLSID_MMDeviceEnumerator, NULL,
+		CLSCTX_ALL, IID_IMMDeviceEnumerator,
+		(void**)&pEnumerator);
+	EXIT_ON_ERROR(hr)
+
+		hr = pEnumerator->GetDefaultAudioEndpoint(
+			eRender, eConsole, &pDevice);
+	EXIT_ON_ERROR(hr)
+
+		hr = pDevice->Activate(
+			IID_IAudioClient, CLSCTX_ALL,
+			NULL, (void**)&pAudioClient);
+	EXIT_ON_ERROR(hr)
+
+		hr = pAudioClient->GetMixFormat(&pwfx);
+	EXIT_ON_ERROR(hr)
+
+		//AdjustFormatTo16Bits(pwfx);
+
+		hr = pAudioClient->Initialize(
+			AUDCLNT_SHAREMODE_SHARED,
+			AUDCLNT_STREAMFLAGS_LOOPBACK| AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+			hnsRequestedDuration,
+			0,
+			pwfx,
+			NULL);
+	EXIT_ON_ERROR(hr)
+
+		size_t persample_size = (pwfx->wBitsPerSample / 8) * pwfx->nChannels;
+
+		// Get the size of the allocated buffer.
+		hr = pAudioClient->GetBufferSize(&bufferFrameCount);
+	EXIT_ON_ERROR(hr)
+
+		hr = pAudioClient->GetService(
+			IID_IAudioCaptureClient,
+			(void**)&pCaptureClient);
+	EXIT_ON_ERROR(hr)
+
+		// Calculate the actual duration of the allocated buffer.
+		hnsActualDuration = (double)REFTIMES_PER_SEC *
+		bufferFrameCount / pwfx->nSamplesPerSec;
+
+
+	av_register_all();
+
+	AVIOContext *output_io_context = NULL;
+	AVFormatContext *output_format_context = NULL;
+	AVCodecContext *output_codec_context = NULL;
+	AVStream *output_stream = NULL;
+	AVCodec *output_codec = NULL;
+
+	const char* out_file = "tdjm.aac";
+
+	if (avio_open(&output_io_context, out_file, AVIO_FLAG_READ_WRITE) < 0) {
+		printf("Failed to open output file!\n");
+		return -1;
+	}
+
+	output_format_context = avformat_alloc_context();
+	if (output_format_context == NULL) {
+		return -1;
+	}
+
+	output_format_context->pb = output_io_context;
+
+	output_format_context->oformat = av_guess_format(NULL, out_file, NULL);
+
+	output_format_context->url = av_strdup(out_file);
+
+	if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC)))
+		return -1;
+
+	int is_support = check_sample_fmt(output_codec, AV_SAMPLE_FMT_S16);
+	int selected_sample = select_sample_rate(output_codec);
+
+	output_stream = avformat_new_stream(output_format_context, NULL);
+	if (!output_stream)
+		return -1;
+
+	output_codec_context = avcodec_alloc_context3(output_codec);
+	if (output_codec_context == NULL)
+		return -1;
+
+	output_codec_context->channels = 2;//av_get_channel_layout_nb_channels(pEncoderCtx->channel_layout);
+	output_codec_context->channel_layout = av_get_default_channel_layout(2); //AV_CH_LAYOUT_STEREO;
+	output_codec_context->sample_rate = pwfx->nSamplesPerSec;//48000
+	output_codec_context->sample_fmt = AV_SAMPLE_FMT_FLTP;
+	output_codec_context->bit_rate = 96000;
+
+	output_codec_context->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+
+	output_codec_context->time_base.den = pwfx->nSamplesPerSec;
+	output_codec_context->time_base.num = 1;
+
+
+	if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER) {
+		output_format_context->oformat->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+	}	
+
+	if (avcodec_open2(output_codec_context, output_codec, NULL) < 0)
+		throw std::exception("open audio encoder failed");
+
+	if (avcodec_parameters_from_context(output_stream->codecpar, output_codec_context) < 0)
+		return -1;
+
+	AVFrame *resampled_frame = av_frame_alloc();
+	resampled_frame->nb_samples = output_codec_context->frame_size;
+	resampled_frame->channel_layout = output_codec_context->channel_layout;
+	resampled_frame->format = output_codec_context->sample_fmt;
+	resampled_frame->sample_rate = output_codec_context->sample_rate;
+
+	int resampled_size = av_samples_get_buffer_size(NULL, output_codec_context->channels, output_codec_context->frame_size, output_codec_context->sample_fmt, 0);
+
+	uint8_t *resampled_buf = (uint8_t*)av_malloc(resampled_size);
+	if (avcodec_fill_audio_frame(resampled_frame, output_codec_context->channels, output_codec_context->sample_fmt, resampled_buf, resampled_size, 0) < 0)
+		return -1;
+
+	AVFrame *sample_frame = av_frame_alloc();
+	sample_frame->nb_samples = 1024;
+	sample_frame->channel_layout = av_get_default_channel_layout(pwfx->nChannels);
+	sample_frame->format = AV_SAMPLE_FMT_FLT;
+	sample_frame->sample_rate = pwfx->nSamplesPerSec;
+
+	int sample_size = av_samples_get_buffer_size(NULL, pwfx->nChannels, 1024, AV_SAMPLE_FMT_FLT, 0);
+	uint8_t *sample_buf = (uint8_t*)av_malloc(sample_size);
+
+	avcodec_fill_audio_frame(sample_frame, pwfx->nChannels, AV_SAMPLE_FMT_FLT, sample_buf, sample_size, 0);
+
+	AVPacket pkt;
+
+	HANDLE hAudioSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+	if (hAudioSamplesReadyEvent == NULL)
+	{
+		printf("Unable to create samples ready event: %d.\n", GetLastError());
+		goto Exit;
+	}
+
+
+	hr = pAudioClient->SetEventHandle(hAudioSamplesReadyEvent);
+	if (FAILED(hr))
+	{
+		printf("Unable to set ready event: %x.\n", hr);
+		return false;
+	}
+
+	//init resampler
+	SwrContext *resample_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_FLTP, 48000, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_FLT, 48000, 0, NULL);
+	int error = swr_init(resample_ctx);
+	if (error < 0)
+		return false;
+
+	//init FIFO buffer
+
+	//start recording
+
+	avformat_write_header(output_format_context, NULL);
+
+	hr = pAudioClient->Start();
+	EXIT_ON_ERROR(hr)
+
+		int pcmInBuffer = 0;
+	int copiedPcm = 0;
+	int gotPacket = 0;
+	int index = 0;
+
+	HANDLE waitArray[3];
+	waitArray[0] = hAudioSamplesReadyEvent;
+
+	uint64_t nextptx = 0;
+
+	while (bDone == false)
+	{
+		DWORD waitResult = WaitForMultipleObjects(1, waitArray, FALSE, INFINITE);
+		switch (waitResult)
+		{
+		case WAIT_OBJECT_0 + 0:     // _AudioSamplesReadyEvent
+			hr = pCaptureClient->GetNextPacketSize(&packetLength);
+
+			while (packetLength != 0)
+			{
+				// Get the available data in the shared buffer.
+				hr = pCaptureClient->GetBuffer(
+					&pData,
+					&numFramesAvailable,
+					&flags, NULL, NULL);
+
+				if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
+				{
+					pData = NULL;  // Tell CopyData to write silence.
+				}
+
+				// Copy the available capture data to the audio sink.
+
+				if (pData != NULL) {
+
+					copiedPcm = min(sample_size - pcmInBuffer, numFramesAvailable*persample_size);
+					if (copiedPcm > 0) {
+						memcpy(sample_buf + pcmInBuffer, pData, copiedPcm);
+						pcmInBuffer += copiedPcm;
+					}
+
+					if (pcmInBuffer == sample_size) {//got one frame ,encode
+
+						int ret = swr_convert(resample_ctx, resampled_frame->data, output_codec_context->frame_size, (const uint8_t**)sample_frame->data, 1024);
+						if (ret <= 0)
+							return -1;
+
+						av_init_packet(&pkt);
+						resampled_frame->pts = nextptx;
+						nextptx += resampled_frame->nb_samples;
+
+						int error = avcodec_send_frame(output_codec_context, resampled_frame);
+
+						if (error == 0) {
+							error = avcodec_receive_packet(output_codec_context, &pkt);
+							if (error == 0) {
+								
+								av_write_frame(output_format_context, &pkt);
+
+								index++;
+
+								printf("index:%d\r\n", index);
+							}
+							av_packet_unref(&pkt);
+						}
+						pcmInBuffer = 0;
+					}
+
+					if (numFramesAvailable*persample_size - copiedPcm > 0)
+					{
+						memcpy(sample_buf + pcmInBuffer, pData + copiedPcm, numFramesAvailable*persample_size - copiedPcm);
+
+						pcmInBuffer += numFramesAvailable*persample_size - copiedPcm;
+					}
+
+					if (index > 2000) {
+						printf("pcm still in buffer:%d\r\n", pcmInBuffer);
+						bDone = true;
+						break;
+					}
+				}
+
+				hr = pCaptureClient->ReleaseBuffer(numFramesAvailable);
+				EXIT_ON_ERROR(hr)
+
+					hr = pCaptureClient->GetNextPacketSize(&packetLength);
+				EXIT_ON_ERROR(hr)
+			}
+
+			break;
+		} // end of 'switch (waitResult)'
+
+	} // end of 'while (stillPlaying)'
+
+
+		av_write_trailer(output_format_context);
+
+		if (output_codec_context)
+			avcodec_free_context(&output_codec_context);
+		if (output_format_context) {
+			avio_closep(&output_format_context->pb);
+			avformat_free_context(output_format_context);
+		}
+
+	hr = pAudioClient->Stop();  // Stop recording.
+	EXIT_ON_ERROR(hr)
+
+		Exit:
+	CoTaskMemFree(pwfx);
+	SAFE_RELEASE(pEnumerator)
+		SAFE_RELEASE(pDevice)
+		SAFE_RELEASE(pAudioClient)
+		SAFE_RELEASE(pCaptureClient)
+
+		return hr;
+
+}
+
+HRESULT cpature_wave()
+{
+	HRESULT hr;
+
+	IMMDeviceEnumerator *pEnumerator = NULL;
+	IMMDevice           *pDevice = NULL;
+	IAudioClient        *pAudioClient = NULL;
+	IAudioCaptureClient *pCaptureClient = NULL;
+	WAVEFORMATEX        *pwfx = NULL;
+
+	REFERENCE_TIME hnsRequestedDuration = REFTIMES_PER_SEC;
+	UINT32         bufferFrameCount;
+	UINT32         numFramesAvailable;
+
+	BYTE           *pData;
+	UINT32         packetLength = 0;
+	DWORD          flags;
+
+	hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
+	if (FAILED(hr))
+	{
+		printf("Unable to initialize COM in thread: %x\n", hr);
+		return hr;
+	}
+
+	hr = CoCreateInstance(CLSID_MMDeviceEnumerator,
+		NULL,
+		CLSCTX_ALL,
+		IID_IMMDeviceEnumerator,
+		(void**)&pEnumerator);
+	EXIT_ON_ERROR(hr)
+
+#ifdef DEF_CAPTURE_MIC
+		hr = pEnumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, &pDevice);
+	//hr = pEnumerator->GetDefaultAudioEndpoint(eCapture,  eMultimedia, &pDevice);
+#else 
+		hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDevice);
+#endif	
+
+	EXIT_ON_ERROR(hr)
+
+		hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient);
+	EXIT_ON_ERROR(hr)
+
+		hr = pAudioClient->GetMixFormat(&pwfx);
+	EXIT_ON_ERROR(hr)
+
+		AdjustFormatTo16Bits(pwfx);
+
+
+
+#ifdef DEF_CAPTURE_MIC
+	hr = pAudioClient->Initialize(
+		AUDCLNT_SHAREMODE_SHARED,
+		AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST,
+		hnsRequestedDuration,
+		0,
+		pwfx,
+		NULL);
+#else
+	/*
+	The AUDCLNT_STREAMFLAGS_LOOPBACK flag enables loopback recording.
+	In loopback recording, the audio engine copies the audio stream
+	that is being played by a rendering endpoint device into an audio endpoint buffer
+	so that a WASAPI client can capture the stream.
+	If this flag is set, the IAudioClient::Initialize method attempts to open a capture buffer on the rendering device.
+	This flag is valid only for a rendering device
+	and only if the Initialize call sets the ShareMode parameter to AUDCLNT_SHAREMODE_SHARED.
+	Otherwise the Initialize call will fail.
+	If the call succeeds,
+	the client can call the IAudioClient::GetService method
+	to obtain an IAudioCaptureClient interface on the rendering device.
+	For more information, see Loopback Recording.
+	*/
+	hr = pAudioClient->Initialize(
+		AUDCLNT_SHAREMODE_SHARED,
+		AUDCLNT_STREAMFLAGS_EVENTCALLBACK|
+		AUDCLNT_STREAMFLAGS_LOOPBACK,
+		hnsRequestedDuration,
+		0,
+		pwfx,
+		NULL);
+#endif
+	EXIT_ON_ERROR(hr)
+
+		int nFrameSize = (pwfx->wBitsPerSample / 8) * pwfx->nChannels;
+
+		REFERENCE_TIME hnsStreamLatency;
+	hr = pAudioClient->GetStreamLatency(&hnsStreamLatency);
+	EXIT_ON_ERROR(hr)
+
+
+		REFERENCE_TIME hnsDefaultDevicePeriod;
+	REFERENCE_TIME hnsMinimumDevicePeriod;
+	hr = pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, &hnsMinimumDevicePeriod);
+	EXIT_ON_ERROR(hr)
+
+
+		hr = pAudioClient->GetBufferSize(&bufferFrameCount);
+	EXIT_ON_ERROR(hr)
+		std::cout << std::endl << "GetBufferSize        : " << bufferFrameCount << std::endl;
+
+	// SetEventHandle
+	//////////////////////////////////////////////////////////////////////////
+	HANDLE hAudioSamplesReadyEvent = CreateEvent(NULL,FALSE,FALSE,NULL);
+	if (hAudioSamplesReadyEvent == NULL)
+	{
+		printf("Unable to create samples ready event: %d.\n", GetLastError());
+		goto Exit;
+	}
+
+
+	hr = pAudioClient->SetEventHandle(hAudioSamplesReadyEvent);
+	if (FAILED(hr))
+	{
+		printf("Unable to set ready event: %x.\n", hr);
+		return false;
+	}
+	//////////////////////////////////////////////////////////////////////////
+
+	hr = pAudioClient->GetService(IID_IAudioCaptureClient, (void**)&pCaptureClient);
+
+	EXIT_ON_ERROR(hr)
+
+		hr = pAudioClient->Start();  // Start recording.
+	EXIT_ON_ERROR(hr)
+
+		printf("\nAudio Capture begin...\n\n");
+
+	int  nCnt = 0;
+
+	size_t nCaptureBufferSize = 8 * 1024 * 1024;
+	size_t nCurrentCaptureIndex = 0;
+
+	BYTE *pbyCaptureBuffer = new (std::nothrow) BYTE[nCaptureBufferSize];
+
+	HANDLE waitArray[3];
+	waitArray[0] = hAudioSamplesReadyEvent;
+
+	bool stillPlaying = true;
+
+	// Each loop fills about half of the shared buffer.
+	while (stillPlaying)
+	{
+		DWORD waitResult = WaitForMultipleObjects(1, waitArray, FALSE, INFINITE);
+		switch (waitResult)
+		{
+		case WAIT_OBJECT_0 + 0:     // _AudioSamplesReadyEvent
+			hr = pCaptureClient->GetNextPacketSize(&packetLength);
+			EXIT_ON_ERROR(hr)
+
+				printf("%06d # _AudioSamplesReadyEvent packetLength:%06u \n", nCnt, packetLength);
+
+			while (packetLength != 0)
+			{
+				// Get the available data in the shared buffer.
+				hr = pCaptureClient->GetBuffer(&pData,
+					&numFramesAvailable,
+					&flags, NULL, NULL);
+				EXIT_ON_ERROR(hr)
+
+
+					nCnt++;
+
+				// test flags
+				//////////////////////////////////////////////////////////////////////////
+				if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
+				{
+					printf("AUDCLNT_BUFFERFLAGS_SILENT \n");
+				}
+
+				if (flags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY)
+				{
+					printf("%06d # AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY \n", nCnt);
+				}
+				//////////////////////////////////////////////////////////////////////////
+
+				UINT32 framesToCopy = min(numFramesAvailable, static_cast<UINT32>((nCaptureBufferSize - nCurrentCaptureIndex) / nFrameSize));
+				if (framesToCopy != 0)
+				{
+					//
+					//  The flags on capture tell us information about the data.
+					//
+					//  We only really care about the silent flag since we want to put frames of silence into the buffer
+					//  when we receive silence.  We rely on the fact that a logical bit 0 is silence for both float and int formats.
+					//
+					if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
+					{
+						//
+						//  Fill 0s from the capture buffer to the output buffer.
+						//
+						ZeroMemory(&pbyCaptureBuffer[nCurrentCaptureIndex], framesToCopy*nFrameSize);
+					}
+					else
+					{
+						//
+						//  Copy data from the audio engine buffer to the output buffer.
+						//
+						CopyMemory(&pbyCaptureBuffer[nCurrentCaptureIndex], pData, framesToCopy*nFrameSize);
+
+						printf("Get: %d\r\n", framesToCopy*nFrameSize);
+					}
+					//
+					//  Bump the capture buffer pointer.
+					//
+					nCurrentCaptureIndex += framesToCopy*nFrameSize;
+				}
+
+				hr = pCaptureClient->ReleaseBuffer(numFramesAvailable);
+				EXIT_ON_ERROR(hr)
+
+					hr = pCaptureClient->GetNextPacketSize(&packetLength);
+				EXIT_ON_ERROR(hr)
+
+					UINT32 ui32NumPaddingFrames;
+				hr = pAudioClient->GetCurrentPadding(&ui32NumPaddingFrames);
+				EXIT_ON_ERROR(hr)
+					if (0 != ui32NumPaddingFrames)
+					{
+						printf("GetCurrentPadding : %6u\n", ui32NumPaddingFrames);
+					}
+				//////////////////////////////////////////////////////////////////////////
+
+				if (nCnt == 1000)
+				{
+					stillPlaying = false;
+					break;
+				}
+
+			} // end of 'while (packetLength != 0)'
+
+			break;
+		} // end of 'switch (waitResult)'
+
+	} // end of 'while (stillPlaying)'
+
+	  //
+	  //  We've now captured our wave data.  Now write it out in a wave file.
+	  //
+	SaveWaveData(pbyCaptureBuffer, nCurrentCaptureIndex, pwfx);
+
+	printf("\nAudio Capture Done.\n");
+
+	hr = pAudioClient->Stop();  // Stop recording.
+	EXIT_ON_ERROR(hr)
+
+		Exit:
+	CoTaskMemFree(pwfx);
+	SAFE_RELEASE(pEnumerator)
+		SAFE_RELEASE(pDevice)
+		SAFE_RELEASE(pAudioClient)
+		SAFE_RELEASE(pCaptureClient)
+
+		CoUninitialize();
+
+	if (pbyCaptureBuffer)
+	{
+		delete[] pbyCaptureBuffer;
+		pbyCaptureBuffer = NULL;
+	}
+
+	if (hAudioSamplesReadyEvent)
+	{
+		CloseHandle(hAudioSamplesReadyEvent);
+		hAudioSamplesReadyEvent = NULL;
+	}
+}
+
+/* check that a given sample format is supported by the encoder */
+static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
+{
+	const enum AVSampleFormat *p = codec->sample_fmts;
+
+	while (*p != AV_SAMPLE_FMT_NONE) {
+		if (*p == sample_fmt)
+			return 1;
+		p++;
+	}
+	return 0;
+}
+
+/* just pick the highest supported samplerate */
+static int select_sample_rate(const AVCodec *codec)
+{
+	const int *p;
+	int best_samplerate = 0;
+
+	if (!codec->supported_samplerates)
+		return 44100;
+
+	p = codec->supported_samplerates;
+	while (*p) {
+		if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate))
+			best_samplerate = *p;
+		p++;
+	}
+	return best_samplerate;
+}
+
+/* select layout with the highest channel count */
+static int select_channel_layout(const AVCodec *codec)
+{
+	const uint64_t *p;
+	uint64_t best_ch_layout = 0;
+	int best_nb_channels = 0;
+
+	if (!codec->channel_layouts)
+		return AV_CH_LAYOUT_STEREO;
+
+	p = codec->channel_layouts;
+	while (*p) {
+		int nb_channels = av_get_channel_layout_nb_channels(*p);
+
+		if (nb_channels > best_nb_channels) {
+			best_ch_layout = *p;
+			best_nb_channels = nb_channels;
+		}
+		p++;
+	}
+	return best_ch_layout;
+}
+
+int main1()
+{
+	//capture_audio();
+	//test_transcode();
+	cpature_wave();
+
+	system("pause");
+
+    return 0;
+}
+

+ 423 - 0
libs/Recorder/Recorder.vcxproj

@@ -0,0 +1,423 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug_DLL|Win32">
+      <Configuration>Debug_DLL</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug_Static|Win32">
+      <Configuration>Debug_Static</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release_DLL|Win32">
+      <Configuration>Release_DLL</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release_Static|Win32">
+      <Configuration>Release_Static</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{7A53119E-1803-42CD-BC2F-CF60B09A42E6}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <RootNamespace>Recorder</RootNamespace>
+    <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+    <ProjectName>Recorder</ProjectName>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v143</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v143</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'" Label="Configuration">
+    <ConfigurationType>DynamicLibrary</ConfigurationType>
+    <UseDebugLibraries>true</UseDebugLibraries>
+    <PlatformToolset>v143</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v143</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'" Label="Configuration">
+    <ConfigurationType>StaticLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v143</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'" Label="Configuration">
+    <ConfigurationType>DynamicLibrary</ConfigurationType>
+    <UseDebugLibraries>false</UseDebugLibraries>
+    <PlatformToolset>v143</PlatformToolset>
+    <WholeProgramOptimization>true</WholeProgramOptimization>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Label="Shared">
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <LinkIncremental>true</LinkIncremental>
+    <OutDir>$(SolutionDir)Bin\$(PlatformShortName)\$(Configuration)\</OutDir>
+    <TargetName>recorder</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">
+    <LinkIncremental>true</LinkIncremental>
+    <OutDir>$(SolutionDir)Bin\$(PlatformShortName)\$(Configuration)\</OutDir>
+    <TargetName>recorder</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">
+    <LinkIncremental>true</LinkIncremental>
+    <OutDir>$(SolutionDir)Bin\$(PlatformShortName)\$(Configuration)\</OutDir>
+    <TargetName>recorder</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <LinkIncremental>false</LinkIncremental>
+    <OutDir>$(SolutionDir)Bin\$(PlatformShortName)\$(Configuration)\</OutDir>
+    <TargetName>recorder</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">
+    <LinkIncremental>false</LinkIncremental>
+    <OutDir>$(SolutionDir)Bin\$(PlatformShortName)\$(Configuration)\</OutDir>
+    <TargetName>recorder</TargetName>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">
+    <LinkIncremental>false</LinkIncremental>
+    <OutDir>$(SolutionDir)Bin\$(PlatformShortName)\$(Configuration)\</OutDir>
+    <TargetName>recorder</TargetName>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <ClCompile>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>$(SolutionDir)ffmpeg\include\;$(SolutionDir)libmp4v2\include\;$(SolutionDir)portaudio\include\;$(OutDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)ffmpeg\lib\;$(SolutionDir)libmp4v2\lib\;$(SolutionDir)portaudio\lib\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>avformat.lib;avutil.lib;avdevice.lib;avcodec.lib;swscale.lib;swresample.lib;postproc.lib;avfilter.lib;Advapi32.lib;Ole32.lib;portaudio_x86.lib;user32.lib;gdi32.lib;dbghelp.lib</AdditionalDependencies>
+    </Link>
+    <PostBuildEvent>
+      <Command>xcopy /r /y $(SolutionDir)ffmpeg\bin $(TargetDir)
+xcopy /r /y $(SolutionDir)libmp4v2\bin\$(Configuration) $(TargetDir)
+xcopy /r /y $(SolutionDir)portaudio\bin $(TargetDir)</Command>
+    </PostBuildEvent>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">
+    <ClCompile>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>$(SolutionDir)ffmpeg\include\;$(SolutionDir)libmp4v2\include\;$(SolutionDir)portaudio\include\;$(OutDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)ffmpeg\lib\;$(SolutionDir)libmp4v2\lib\;$(SolutionDir)portaudio\lib\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>avformat.lib;avutil.lib;avdevice.lib;avcodec.lib;swscale.lib;swresample.lib;postproc.lib;avfilter.lib;Advapi32.lib;Ole32.lib;portaudio_x86.lib;libmp4v2D.lib</AdditionalDependencies>
+    </Link>
+    <PostBuildEvent>
+      <Command>xcopy /r /y $(SolutionDir)ffmpeg\bin $(TargetDir)
+xcopy /r /y $(SolutionDir)libmp4v2\bin\$(Configuration) $(TargetDir)
+xcopy /r /y $(SolutionDir)portaudio\bin $(TargetDir)</Command>
+    </PostBuildEvent>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">
+    <ClCompile>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>$(SolutionDir)ffmpeg\include\;$(SolutionDir)libmp4v2\include\;$(SolutionDir)portaudio\include\;$(OutDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)ffmpeg\lib\;$(SolutionDir)libmp4v2\lib\;$(SolutionDir)portaudio\lib\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>avformat.lib;avutil.lib;avdevice.lib;avcodec.lib;swscale.lib;swresample.lib;postproc.lib;avfilter.lib;Advapi32.lib;Ole32.lib;portaudio_x86.lib;user32.lib;gdi32.lib;dbghelp.lib</AdditionalDependencies>
+    </Link>
+    <PostBuildEvent>
+      <Command>xcopy /r /y $(SolutionDir)ffmpeg\bin $(TargetDir)
+xcopy /r /y $(SolutionDir)portaudio\bin $(TargetDir)</Command>
+    </PostBuildEvent>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>$(SolutionDir)ffmpeg\include\;$(SolutionDir)libmp4v2\include\;$(SolutionDir)portaudio\include\;$(OutDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)ffmpeg\lib\;$(SolutionDir)libmp4v2\lib\;$(SolutionDir)portaudio\lib\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>avformat.lib;avutil.lib;avdevice.lib;avcodec.lib;swscale.lib;swresample.lib;postproc.lib;avfilter.lib;Advapi32.lib;Ole32.lib;portaudio_x86.lib;user32.lib;gdi32.lib;dbghelp.lib</AdditionalDependencies>
+      <AdditionalOptions>/SAFESEH:NO %(AdditionalOptions)</AdditionalOptions>
+    </Link>
+    <PostBuildEvent>
+      <Command>xcopy /r /y $(SolutionDir)ffmpeg\bin $(TargetDir)
+xcopy /r /y $(SolutionDir)libmp4v2\bin\$(Configuration) $(TargetDir)
+xcopy /r /y $(SolutionDir)portaudio\bin $(TargetDir)</Command>
+    </PostBuildEvent>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>$(SolutionDir)ffmpeg\include\;$(SolutionDir)libmp4v2\include\;$(SolutionDir)portaudio\include\;$(OutDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)ffmpeg\lib\;$(SolutionDir)libmp4v2\lib\;$(SolutionDir)portaudio\lib\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>avformat.lib;avutil.lib;avdevice.lib;avcodec.lib;swscale.lib;swresample.lib;postproc.lib;avfilter.lib;Advapi32.lib;Ole32.lib;portaudio_x86.lib;libmp4v2.lib</AdditionalDependencies>
+      <AdditionalOptions>/SAFESEH:NO %(AdditionalOptions)</AdditionalOptions>
+    </Link>
+    <PostBuildEvent>
+      <Command>xcopy /r /y $(SolutionDir)ffmpeg\bin $(TargetDir)
+xcopy /r /y $(SolutionDir)libmp4v2\bin\$(Configuration) $(TargetDir)
+xcopy /r /y $(SolutionDir)portaudio\bin $(TargetDir)</Command>
+    </PostBuildEvent>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">
+    <ClCompile>
+      <WarningLevel>Level3</WarningLevel>
+      <PrecompiledHeader>NotUsing</PrecompiledHeader>
+      <Optimization>MaxSpeed</Optimization>
+      <FunctionLevelLinking>true</FunctionLevelLinking>
+      <IntrinsicFunctions>true</IntrinsicFunctions>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <AdditionalIncludeDirectories>$(SolutionDir)ffmpeg\include\;$(SolutionDir)libmp4v2\include\;$(SolutionDir)portaudio\include\;$(OutDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <SubSystem>Console</SubSystem>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <OptimizeReferences>true</OptimizeReferences>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <AdditionalLibraryDirectories>$(SolutionDir)ffmpeg\lib\;$(SolutionDir)libmp4v2\lib\;$(SolutionDir)portaudio\lib\;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+      <AdditionalDependencies>avformat.lib;avutil.lib;avdevice.lib;avcodec.lib;swscale.lib;swresample.lib;postproc.lib;avfilter.lib;Advapi32.lib;Ole32.lib;portaudio_x86.lib;user32.lib;gdi32.lib;dbghelp.lib</AdditionalDependencies>
+      <AdditionalOptions>/SAFESEH:NO %(AdditionalOptions)</AdditionalOptions>
+    </Link>
+    <PostBuildEvent>
+      <Command>echo f| xcopy /r /y $(SolutionDir)ffmpeg\bin $(TargetDir)
+echo f| xcopy /r /y $(SolutionDir)portaudio\bin $(TargetDir)
+echo f| xcopy /r /y $(ProjectDir)export.h $(SolutionDir)2node\platform\win32\export.h
+echo f| xcopy /r /y $(TargetDir)$(TargetName).dll $(SolutionDir)2node\platform\win32\$(TargetName).dll
+echo f| xcopy /r /y $(TargetDir)$(TargetName).lib $(SolutionDir)2node\platform\win32\$(TargetName).lib</Command>
+    </PostBuildEvent>
+  </ItemDefinitionGroup>
+  <ItemGroup>
+    <ClInclude Include="d3d_helper.h" />
+    <ClInclude Include="device_videos.h" />
+    <ClInclude Include="encoder_video_x264.h" />
+    <ClInclude Include="encoder_aac.h" />
+    <ClInclude Include="encoder_video_nvenc.h" />
+    <ClInclude Include="encoder_video.h" />
+    <ClInclude Include="encoder_video_define.h" />
+    <ClInclude Include="encoder_video_factory.h" />
+    <ClInclude Include="error_define.h" />
+    <ClInclude Include="filter.h" />
+    <ClInclude Include="filter_aresample.h" />
+    <ClInclude Include="filter_amix.h" />
+    <ClInclude Include="hardware_acceleration.h" />
+    <ClInclude Include="headers_ffmpeg.h" />
+    <ClInclude Include="log_helper.h" />
+    <ClInclude Include="headers_mmdevice.h" />
+    <ClInclude Include="mul_db.h" />
+    <ClInclude Include="muxer_define.h" />
+    <ClInclude Include="muxer_file.h" />
+    <ClInclude Include="muxer_ffmpeg.h" />
+    <ClInclude Include="record_desktop_mag.h" />
+    <ClInclude Include="record_desktop_wgc.h" />
+    <ClInclude Include="record_audio.h" />
+    <ClInclude Include="record_audio_define.h" />
+    <ClInclude Include="record_audio_factory.h" />
+    <ClInclude Include="record_audio_dshow.h" />
+    <ClInclude Include="record_audio_wasapi.h" />
+    <ClInclude Include="record_desktop.h" />
+    <ClInclude Include="record_desktop_define.h" />
+    <ClInclude Include="record_desktop_duplication.h" />
+    <ClInclude Include="record_desktop_ffmpeg_dshow.h" />
+    <ClInclude Include="record_desktop_factory.h" />
+    <ClInclude Include="record_desktop_ffmpeg_gdi.h" />
+    <ClInclude Include="device_audios.h" />
+    <ClInclude Include="export.h" />
+    <ClInclude Include="record_desktop_gdi.h" />
+    <ClInclude Include="remuxer_ffmpeg.h" />
+    <ClInclude Include="resample_pcm.h" />
+    <ClInclude Include="ring_buffer.h" />
+    <ClInclude Include="sws_helper.h" />
+    <ClInclude Include="system_error.h" />
+    <ClInclude Include="system_lib.h" />
+    <ClInclude Include="system_time.h" />
+    <ClInclude Include="utils_string.h" />
+    <ClInclude Include="system_version.h" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="d3d_helper.cpp" />
+    <ClCompile Include="device_videos.cpp" />
+    <ClCompile Include="dllmain.cpp" />
+    <ClCompile Include="encoder_video_x264.cpp" />
+    <ClCompile Include="encoder_aac.cpp" />
+    <ClCompile Include="encoder_video_nvenc.cpp" />
+    <ClCompile Include="encoder_video.cpp" />
+    <ClCompile Include="encoder_video_factory.cpp" />
+    <ClCompile Include="filter.cpp" />
+    <ClCompile Include="filter_aresample.cpp" />
+    <ClCompile Include="filter_amix.cpp" />
+    <ClCompile Include="hardware_acceleration.cpp" />
+    <ClCompile Include="log_helper.cpp" />
+    <ClCompile Include="main.cpp" />
+    <ClCompile Include="headers_mmdevice.cpp" />
+    <ClCompile Include="muxer_file.cpp" />
+    <ClCompile Include="muxer_ffmpeg.cpp" />
+    <ClCompile Include="record_desktop_mag.cpp" />
+    <ClCompile Include="record_desktop_wgc.cpp" />
+    <ClCompile Include="record_audio.cpp" />
+    <ClCompile Include="record_audio_factory.cpp" />
+    <ClCompile Include="record_audio_dshow.cpp" />
+    <ClCompile Include="record_audio_wasapi.cpp" />
+    <ClCompile Include="record_desktop.cpp" />
+    <ClCompile Include="record_desktop_duplication.cpp" />
+    <ClCompile Include="record_desktop_ffmpeg_dshow.cpp" />
+    <ClCompile Include="record_desktop_ffmpeg_gdi.cpp" />
+    <ClCompile Include="record_desktop_factory.cpp" />
+    <ClCompile Include="device_audios.cpp" />
+    <ClCompile Include="export.cpp" />
+    <ClCompile Include="record_desktop_gdi.cpp" />
+    <ClCompile Include="remuxer_ffmpeg.cpp" />
+    <ClCompile Include="resample_pcm.cpp" />
+    <ClCompile Include="sws_helper.cpp" />
+    <ClCompile Include="system_error.cpp" />
+    <ClCompile Include="system_lib.cpp" />
+    <ClCompile Include="system_time.cpp" />
+    <ClCompile Include="utils_string.cpp" />
+    <ClCompile Include="system_version.cpp" />
+  </ItemGroup>
+  <ItemGroup>
+    <FxCompile Include="d3d_pixelshader.hlsl">
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">
+      </ObjectFileOutput>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">PS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">PS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">PS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">PS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">PS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">PS</EntryPointName>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Pixel</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">Pixel</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Pixel</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">Pixel</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">Pixel</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">Pixel</ShaderType>
+    </FxCompile>
+    <FxCompile Include="d3d_vertexshader.hlsl">
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Vertex</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">Vertex</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Vertex</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">Vertex</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">Vertex</ShaderType>
+      <ShaderType Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">Vertex</ShaderType>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">VS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">VS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">VS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">VS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">VS</EntryPointName>
+      <EntryPointName Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">VS</EntryPointName>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_Static|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Release_DLL|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_Static|Win32'">
+      </ObjectFileOutput>
+      <HeaderFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">$(OutDir)%(Filename).h</HeaderFileOutput>
+      <ObjectFileOutput Condition="'$(Configuration)|$(Platform)'=='Debug_DLL|Win32'">
+      </ObjectFileOutput>
+    </FxCompile>
+  </ItemGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>

+ 344 - 0
libs/Recorder/Recorder.vcxproj.filters

@@ -0,0 +1,344 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Source Files">
+      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+    </Filter>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
+    </Filter>
+    <Filter Include="Resource Files">
+      <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+      <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+    </Filter>
+    <Filter Include="Header Files\muxer">
+      <UniqueIdentifier>{c728c158-0d9c-4769-bd5e-52b2744f7521}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\muxer">
+      <UniqueIdentifier>{d50f186f-c489-4503-be78-0fbc32d85858}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\encoder">
+      <UniqueIdentifier>{3785b6df-1472-4dee-933f-5ff6c140b16e}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\encoder">
+      <UniqueIdentifier>{017653cc-c318-4960-8099-8dc6a3ca5407}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\record">
+      <UniqueIdentifier>{06d19726-b0cf-49cb-9562-ecb1d4525c50}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\record">
+      <UniqueIdentifier>{5e348938-a8f3-473f-9004-0a584fb52803}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\devices">
+      <UniqueIdentifier>{abffc59a-129a-468d-b433-0278780530de}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\devices">
+      <UniqueIdentifier>{e67755db-f19e-439c-839b-30e3c6c1ca61}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\filters">
+      <UniqueIdentifier>{495cf6d1-c879-4172-900c-e77a3ca74343}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\filters">
+      <UniqueIdentifier>{7b3e2bcb-f9aa-47e1-99f6-fd69cef6f46b}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\helpers">
+      <UniqueIdentifier>{4ebe5c56-1ded-4999-8ec8-a412f7e7e7c8}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\helpers">
+      <UniqueIdentifier>{4a9d1e55-d315-4d38-830e-30283889a177}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\logger">
+      <UniqueIdentifier>{36132240-93e0-4286-8729-4ab2df2ff726}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\defines">
+      <UniqueIdentifier>{6efa3ce5-a902-4756-866e-0b71d7a64e2d}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\helpers\win">
+      <UniqueIdentifier>{524dd0a6-01e1-4df1-99a4-6453e72b2f00}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\helpers\win">
+      <UniqueIdentifier>{86ee3356-e233-46ea-a2cf-9250808ac3f3}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\encoder\video">
+      <UniqueIdentifier>{8f473355-4af0-422e-8cea-ea39e4b0b3a9}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\encoder\audio">
+      <UniqueIdentifier>{7313da4d-6f7a-4e00-a606-53824bb73a04}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\encoder\video">
+      <UniqueIdentifier>{c9701cf2-d3f0-4418-9ca5-b5fe4a9001d8}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Header Files\encoder\audio">
+      <UniqueIdentifier>{b0ab0abf-99fb-4f8a-9710-f41a58665216}</UniqueIdentifier>
+    </Filter>
+    <Filter Include="Source Files\logger">
+      <UniqueIdentifier>{48ac2435-d669-43a7-839f-22bfeedc224d}</UniqueIdentifier>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="export.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="muxer_define.h">
+      <Filter>Header Files\muxer</Filter>
+    </ClInclude>
+    <ClInclude Include="muxer_file.h">
+      <Filter>Header Files\muxer</Filter>
+    </ClInclude>
+    <ClInclude Include="record_audio.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_audio_define.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_audio_dshow.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_audio_factory.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_audio_wasapi.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_define.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_factory.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_ffmpeg_dshow.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_ffmpeg_gdi.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="device_audios.h">
+      <Filter>Header Files\devices</Filter>
+    </ClInclude>
+    <ClInclude Include="device_videos.h">
+      <Filter>Header Files\devices</Filter>
+    </ClInclude>
+    <ClInclude Include="headers_ffmpeg.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="log_helper.h">
+      <Filter>Header Files\logger</Filter>
+    </ClInclude>
+    <ClInclude Include="utils_string.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="headers_mmdevice.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="error_define.h">
+      <Filter>Header Files\defines</Filter>
+    </ClInclude>
+    <ClInclude Include="ring_buffer.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="resample_pcm.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="sws_helper.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="filter_aresample.h">
+      <Filter>Header Files\filters</Filter>
+    </ClInclude>
+    <ClInclude Include="filter_amix.h">
+      <Filter>Header Files\filters</Filter>
+    </ClInclude>
+    <ClInclude Include="filter.h">
+      <Filter>Header Files\filters</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_duplication.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_gdi.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="system_lib.h">
+      <Filter>Header Files\helpers\win</Filter>
+    </ClInclude>
+    <ClInclude Include="system_version.h">
+      <Filter>Header Files\helpers\win</Filter>
+    </ClInclude>
+    <ClInclude Include="hardware_acceleration.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="d3d_helper.h">
+      <Filter>Header Files\helpers\win</Filter>
+    </ClInclude>
+    <ClInclude Include="encoder_video_define.h">
+      <Filter>Header Files\encoder\video</Filter>
+    </ClInclude>
+    <ClInclude Include="encoder_video.h">
+      <Filter>Header Files\encoder\video</Filter>
+    </ClInclude>
+    <ClInclude Include="encoder_aac.h">
+      <Filter>Header Files\encoder\audio</Filter>
+    </ClInclude>
+    <ClInclude Include="encoder_video_factory.h">
+      <Filter>Header Files\encoder\video</Filter>
+    </ClInclude>
+    <ClInclude Include="encoder_video_x264.h">
+      <Filter>Header Files\encoder\video</Filter>
+    </ClInclude>
+    <ClInclude Include="encoder_video_nvenc.h">
+      <Filter>Header Files\encoder\video</Filter>
+    </ClInclude>
+    <ClInclude Include="muxer_ffmpeg.h">
+      <Filter>Header Files\muxer</Filter>
+    </ClInclude>
+    <ClInclude Include="remuxer_ffmpeg.h">
+      <Filter>Header Files\muxer</Filter>
+    </ClInclude>
+    <ClInclude Include="system_time.h">
+      <Filter>Header Files\helpers\win</Filter>
+    </ClInclude>
+    <ClInclude Include="mul_db.h">
+      <Filter>Header Files\helpers</Filter>
+    </ClInclude>
+    <ClInclude Include="system_error.h">
+      <Filter>Header Files\helpers\win</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_wgc.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+    <ClInclude Include="record_desktop_mag.h">
+      <Filter>Header Files\record</Filter>
+    </ClInclude>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="main.cpp">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="export.cpp">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="muxer_file.cpp">
+      <Filter>Source Files\muxer</Filter>
+    </ClCompile>
+    <ClCompile Include="record_audio.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_audio_dshow.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_audio_factory.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_audio_wasapi.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop_factory.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop_ffmpeg_dshow.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop_ffmpeg_gdi.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="device_audios.cpp">
+      <Filter>Source Files\devices</Filter>
+    </ClCompile>
+    <ClCompile Include="device_videos.cpp">
+      <Filter>Source Files\devices</Filter>
+    </ClCompile>
+    <ClCompile Include="utils_string.cpp">
+      <Filter>Source Files\helpers</Filter>
+    </ClCompile>
+    <ClCompile Include="headers_mmdevice.cpp">
+      <Filter>Source Files\helpers</Filter>
+    </ClCompile>
+    <ClCompile Include="resample_pcm.cpp">
+      <Filter>Source Files\helpers</Filter>
+    </ClCompile>
+    <ClCompile Include="sws_helper.cpp">
+      <Filter>Source Files\helpers</Filter>
+    </ClCompile>
+    <ClCompile Include="filter_aresample.cpp">
+      <Filter>Source Files\filters</Filter>
+    </ClCompile>
+    <ClCompile Include="filter_amix.cpp">
+      <Filter>Source Files\filters</Filter>
+    </ClCompile>
+    <ClCompile Include="filter.cpp">
+      <Filter>Source Files\filters</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop_duplication.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop_gdi.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="system_lib.cpp">
+      <Filter>Source Files\helpers\win</Filter>
+    </ClCompile>
+    <ClCompile Include="system_version.cpp">
+      <Filter>Source Files\helpers\win</Filter>
+    </ClCompile>
+    <ClCompile Include="hardware_acceleration.cpp">
+      <Filter>Source Files\helpers</Filter>
+    </ClCompile>
+    <ClCompile Include="d3d_helper.cpp">
+      <Filter>Source Files\helpers\win</Filter>
+    </ClCompile>
+    <ClCompile Include="encoder_video.cpp">
+      <Filter>Source Files\encoder\video</Filter>
+    </ClCompile>
+    <ClCompile Include="encoder_aac.cpp">
+      <Filter>Source Files\encoder\audio</Filter>
+    </ClCompile>
+    <ClCompile Include="encoder_video_factory.cpp">
+      <Filter>Source Files\encoder\video</Filter>
+    </ClCompile>
+    <ClCompile Include="encoder_video_x264.cpp">
+      <Filter>Source Files\encoder\video</Filter>
+    </ClCompile>
+    <ClCompile Include="encoder_video_nvenc.cpp">
+      <Filter>Source Files\encoder\video</Filter>
+    </ClCompile>
+    <ClCompile Include="muxer_ffmpeg.cpp">
+      <Filter>Source Files\muxer</Filter>
+    </ClCompile>
+    <ClCompile Include="remuxer_ffmpeg.cpp">
+      <Filter>Source Files\muxer</Filter>
+    </ClCompile>
+    <ClCompile Include="dllmain.cpp">
+      <Filter>Source Files</Filter>
+    </ClCompile>
+    <ClCompile Include="system_time.cpp">
+      <Filter>Source Files\helpers\win</Filter>
+    </ClCompile>
+    <ClCompile Include="log_helper.cpp">
+      <Filter>Source Files\logger</Filter>
+    </ClCompile>
+    <ClCompile Include="system_error.cpp">
+      <Filter>Source Files\helpers\win</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop_wgc.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+    <ClCompile Include="record_desktop_mag.cpp">
+      <Filter>Source Files\record</Filter>
+    </ClCompile>
+  </ItemGroup>
+  <ItemGroup>
+    <FxCompile Include="d3d_pixelshader.hlsl">
+      <Filter>Source Files\helpers\win</Filter>
+    </FxCompile>
+    <FxCompile Include="d3d_vertexshader.hlsl">
+      <Filter>Source Files\helpers\win</Filter>
+    </FxCompile>
+  </ItemGroup>
+</Project>

+ 9 - 0
libs/Recorder/common.h

@@ -0,0 +1,9 @@
+#pragma once
+
+#include <libavformat\avformat.h>
+#include <libavutil\avutil.h>
+#include <libavdevice\avdevice.h>
+#include "libavcodec\avcodec.h"  
+#include "libswscale\swscale.h"
+#include "libswresample\swresample.h"
+#include <libavutil\audio_fifo.h>

+ 57 - 0
libs/Recorder/d3d_helper.cpp

@@ -0,0 +1,57 @@
+#include "d3d_helper.h"
+
+#include "system_lib.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+	typedef HRESULT(WINAPI *DXGI_FUNC_CREATEFACTORY)(REFIID, IDXGIFactory1 **);
+
+	std::list<IDXGIAdapter*> d3d_helper::get_adapters(int * error, bool free_lib)
+	{
+		std::list<IDXGIAdapter*> adapters;
+
+		*error = AE_NO;
+
+		HMODULE hdxgi = load_system_library("dxgi.dll");
+
+		if (!hdxgi) {
+			*error = AE_D3D_LOAD_FAILED;
+			return adapters;
+		}
+
+		do {
+			DXGI_FUNC_CREATEFACTORY create_factory = nullptr;
+			create_factory = (DXGI_FUNC_CREATEFACTORY)GetProcAddress(hdxgi, "CreateDXGIFactory1");
+
+			if (create_factory == nullptr) {
+				*error = AE_DXGI_GET_PROC_FAILED;
+				break;
+			}
+
+			IDXGIFactory1 * dxgi_factory = nullptr;
+			HRESULT hr = create_factory(__uuidof(IDXGIFactory1), &dxgi_factory);
+			if (FAILED(hr)) {
+				*error = AE_DXGI_GET_FACTORY_FAILED;
+				break;
+			}
+
+			unsigned int i = 0;
+			IDXGIAdapter *adapter = nullptr;
+			while (dxgi_factory->EnumAdapters(i, &adapter) != DXGI_ERROR_NOT_FOUND)
+			{
+				if(adapter)
+					adapters.push_back(adapter);
+				++i;
+			}
+
+			dxgi_factory->Release();
+
+		} while (0);
+
+		if (free_lib && hdxgi) free_system_library(hdxgi);
+
+		return adapters;
+	}
+}

+ 18 - 0
libs/Recorder/d3d_helper.h

@@ -0,0 +1,18 @@
+#ifndef D3D_HELPER
+#define D3D_HELPER
+
+#include <dxgi1_2.h>
+
+#include <list>
+
+namespace am {
+	class d3d_helper
+	{
+	private:
+		d3d_helper() {}
+
+	public:
+		static std::list<IDXGIAdapter*> get_adapters(int *error, bool free_lib = false);
+	};
+}
+#endif

+ 24 - 0
libs/Recorder/d3d_pixelshader.hlsl

@@ -0,0 +1,24 @@
+// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
+// PARTICULAR PURPOSE.
+//
+// Copyright (c) Microsoft Corporation. All rights reserved
+//----------------------------------------------------------------------
+
+Texture2D tx : register( t0 );
+SamplerState samLinear : register( s0 );
+
+struct PS_INPUT
+{
+    float4 Pos : SV_POSITION;
+    float2 Tex : TEXCOORD;
+};
+
+//--------------------------------------------------------------------------------------
+// Pixel Shader
+//--------------------------------------------------------------------------------------
+float4 PS(PS_INPUT input) : SV_Target
+{
+    return tx.Sample( samLinear, input.Tex );
+}

+ 28 - 0
libs/Recorder/d3d_vertexshader.hlsl

@@ -0,0 +1,28 @@
+// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
+// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
+// PARTICULAR PURPOSE.
+//
+// Copyright (c) Microsoft Corporation. All rights reserved
+//----------------------------------------------------------------------
+
+struct VS_INPUT
+{
+    float4 Pos : POSITION;
+    float2 Tex : TEXCOORD;
+};
+
+struct VS_OUTPUT
+{
+    float4 Pos : SV_POSITION;
+    float2 Tex : TEXCOORD;
+};
+
+
+//--------------------------------------------------------------------------------------
+// Vertex Shader
+//--------------------------------------------------------------------------------------
+VS_OUTPUT VS(VS_INPUT input)
+{
+    return input;
+}

+ 251 - 0
libs/Recorder/device_audios.cpp

@@ -0,0 +1,251 @@
+#include "device_audios.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+#include "utils_string.h"
+
+#include "headers_mmdevice.h"
+
+#include <memory>
+
+namespace am {
+
+	int device_audios::get_input_devices(std::list<DEVICE_AUDIOS>& devices)
+	{
+		return get_devices(true, devices);
+	}
+
+	int device_audios::get_output_devices(std::list<DEVICE_AUDIOS>& devices)
+	{
+		return get_devices(false, devices);
+	}
+
+	int device_audios::get_default_input_device(std::string & id, std::string & name)
+	{
+		return get_default(true, id, name);
+	}
+
+	int device_audios::get_default_ouput_device(std::string & id, std::string & name)
+	{
+		return get_default(false, id, name);
+	}
+
+	int device_audios::get_devices(bool input, std::list<DEVICE_AUDIOS>& devices)
+	{
+		com_initialize com_obj;
+
+		devices.clear();
+
+		Microsoft::WRL::ComPtr<IMMDeviceEnumerator> device_enumerator = nullptr;
+		Microsoft::WRL::ComPtr<IMMDevice> device = nullptr;
+		Microsoft::WRL::ComPtr<IMMDeviceCollection> collection = nullptr;
+		LPWSTR current_device_id = NULL;
+
+		int ret = AE_NO;
+
+		do {
+			std::shared_ptr<void> raii_ptr(nullptr, [&](void*) {
+				collection = nullptr;
+				device = nullptr;
+				device_enumerator = nullptr;
+			});
+
+			HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL,
+				__uuidof(IMMDeviceEnumerator), (void**)device_enumerator.GetAddressOf());
+
+			if (FAILED(hr)) {
+				ret = AE_CO_CREATE_FAILED;
+				break;
+			}
+
+			hr = device_enumerator->GetDefaultAudioEndpoint(input == true ? eCapture : eRender, eConsole, device.GetAddressOf());
+			if (FAILED(hr)) {
+				ret = AE_CO_GETENDPOINT_FAILED;
+				break;
+			}
+
+			hr = device_enumerator->EnumAudioEndpoints(input == true ? eCapture : eRender, DEVICE_STATE_ACTIVE, collection.GetAddressOf());
+			if (FAILED(hr)) {
+				ret = AE_CO_ENUMENDPOINT_FAILED;
+				break;
+			}
+
+			UINT count;
+			hr = collection->GetCount(&count);
+			if (FAILED(hr)) {
+				ret = AE_CO_GET_ENDPOINT_COUNT_FAILED;
+				break;
+			}
+
+			hr = device->GetId(&current_device_id);
+			if (FAILED(hr)) {
+				ret = AE_CO_GET_ENDPOINT_ID_FAILED;
+				break;
+			}
+
+			std::string default_id = utils_string::unicode_utf8(current_device_id);
+
+			CoTaskMemFree(current_device_id);
+
+			for (int i = 0; i < count; ++i) {
+				IMMDevice* pEndpointDevice = NULL;
+				IDeviceTopology* deviceTopology = NULL;
+				IConnector* connector = NULL;
+
+				IPropertyStore *pPropertyStore = NULL;
+				PROPVARIANT pv;
+				PropVariantInit(&pv);
+
+				LPWSTR device_name = NULL;
+				LPWSTR device_id = NULL;
+
+				std::string str_name, str_id, str_friendly;
+
+				hr = collection->Item(i, &pEndpointDevice);
+				if (FAILED(hr)) continue;
+
+				/*hr = pEndpointDevice->Activate(__uuidof(IDeviceTopology), CLSCTX_INPROC_SERVER, NULL,
+					(LPVOID*)&deviceTopology);
+				hr = deviceTopology->GetConnector(0, &connector);
+
+				hr = connector->GetConnectorIdConnectedTo(&device_name);
+
+				str_name = utils_string::unicode_utf8(device_name);*/
+
+				hr = pEndpointDevice->GetId(&device_id);
+				if (FAILED(hr)) continue;
+
+				str_id = utils_string::unicode_utf8(device_id);
+
+				hr = pEndpointDevice->OpenPropertyStore(STGM_READ, &pPropertyStore);
+				if (FAILED(hr)) continue;
+
+				hr = pPropertyStore->GetValue(PKEY_Device_FriendlyName, &pv);
+				if (FAILED(hr)) {
+					PropVariantClear(&pv);
+					continue;
+				}
+
+				if (pv.vt == VT_LPWSTR) {
+					str_friendly = utils_string::unicode_utf8(pv.pwszVal);
+				}
+				else if (pv.vt == VT_LPSTR) {
+					str_friendly = utils_string::ascii_utf8(pv.pszVal);
+				}
+
+				devices.push_back({
+					str_id,
+					str_friendly,
+					str_id.compare(default_id) == 0
+				});
+
+				PropVariantClear(&pv);
+				CoTaskMemFree(device_name);
+				CoTaskMemFree(device_id);
+			}
+		} while (0);
+
+		if (ret == AE_NO && devices.size()) {
+			devices.push_front({
+				utils_string::ascii_utf8(DEFAULT_AUDIO_INOUTPUT_ID),
+				utils_string::ascii_utf8(DEFAULT_AUDIO_INOUTPUT_NAME),
+				true
+			});
+		}
+
+
+		if (ret != AE_NO)
+			al_error("get_devices failed(%lu): %s", GetLastError(), err2str(ret));
+
+		return ret;
+	}
+
+	int device_audios::get_default(bool input, std::string & id, std::string & name)
+	{
+		com_initialize com_obj;
+
+		Microsoft::WRL::ComPtr<IMMDeviceEnumerator> device_enumerator = nullptr;
+		Microsoft::WRL::ComPtr<IMMDevice> device = nullptr;
+		Microsoft::WRL::ComPtr<IMMDeviceCollection> collection = nullptr;
+		LPWSTR current_device_id = NULL;
+
+		std::shared_ptr<void> raii_ptr(nullptr, [&](void*) {
+			collection = nullptr;
+			device = nullptr;
+			device_enumerator = nullptr;
+		});
+
+		int ret = AE_NO;
+		do {
+
+			HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL,
+				__uuidof(IMMDeviceEnumerator), (void**)device_enumerator.GetAddressOf());
+			if (FAILED(hr)) {
+				ret = AE_CO_CREATE_FAILED;
+				break;
+			}
+
+			hr = device_enumerator->GetDefaultAudioEndpoint(input == true ? eCapture : eRender, eConsole, device.GetAddressOf());
+			if (FAILED(hr)) {
+				ret = AE_CO_GETENDPOINT_FAILED;
+				break;
+			}
+
+			hr = device_enumerator->EnumAudioEndpoints(input == true ? eCapture : eRender, DEVICE_STATE_ACTIVE, collection.GetAddressOf());
+			if (FAILED(hr)) {
+				ret = AE_CO_ENUMENDPOINT_FAILED;
+				break;
+			}
+
+			UINT count;
+			hr = collection->GetCount(&count);
+			if (FAILED(hr)) {
+				ret = AE_CO_GET_ENDPOINT_COUNT_FAILED;
+				break;
+			}
+
+			hr = device->GetId(&current_device_id);
+			if (FAILED(hr)) {
+				ret = AE_CO_GET_ENDPOINT_ID_FAILED;
+				break;
+			}
+
+			id = utils_string::unicode_utf8(current_device_id);
+
+			CoTaskMemFree(current_device_id);
+
+			IPropertyStore *pPropertyStore = NULL;
+			PROPVARIANT pv;
+			PropVariantInit(&pv);
+
+			hr = device->OpenPropertyStore(STGM_READ, &pPropertyStore);
+			if (FAILED(hr)) {
+				ret = AE_CO_OPEN_PROPERTY_FAILED;
+				break;
+			}
+
+			hr = pPropertyStore->GetValue(PKEY_Device_FriendlyName, &pv);
+			if (FAILED(hr)) {
+				ret = AE_CO_GET_VALUE_FAILED;
+				break;
+			}
+
+			if (pv.vt == VT_LPWSTR) {
+				name = utils_string::unicode_utf8(pv.pwszVal);
+			}
+			else if (pv.vt == VT_LPSTR) {
+				name = utils_string::ascii_utf8(pv.pszVal);
+			}
+
+			PropVariantClear(&pv);
+		} while (0);
+
+
+		if (ret != AE_NO)
+			al_debug("get_devices failed(%lu): %s", GetLastError(), err2str(ret));
+
+		return ret;
+	}
+
+}

+ 33 - 0
libs/Recorder/device_audios.h

@@ -0,0 +1,33 @@
+#ifndef  RECORD_DEVICES
+#define RECORD_DEVICES
+
+#include <list>
+#include <string>
+
+namespace am {
+	typedef struct {
+		std::string id;
+		std::string name;
+		uint8_t is_default;
+	}DEVICE_AUDIOS;
+
+	class device_audios
+	{
+	public:
+		static int get_default_input_device(std::string &id, std::string &name);
+
+		static int get_default_ouput_device(std::string &id, std::string &name);
+
+		static int get_input_devices(std::list<DEVICE_AUDIOS> &devices);
+
+		static int get_output_devices(std::list<DEVICE_AUDIOS> &devices);
+
+	private:
+		static int get_devices(bool input, std::list<DEVICE_AUDIOS> &devices);
+
+		static int get_default(bool input, std::string &id, std::string &name);
+	};
+
+}
+
+#endif // ! RECORD_DEVICES

+ 11 - 0
libs/Recorder/device_videos.cpp

@@ -0,0 +1,11 @@
+#include "device_videos.h"
+
+
+device_videos::device_videos()
+{
+}
+
+
+device_videos::~device_videos()
+{
+}

+ 8 - 0
libs/Recorder/device_videos.h

@@ -0,0 +1,8 @@
+#pragma once
+class device_videos
+{
+public:
+	device_videos();
+	~device_videos();
+};
+

+ 84 - 0
libs/Recorder/dllmain.cpp

@@ -0,0 +1,84 @@
+#include <Windows.h>
+
+#include <dbghelp.h>
+#include <stdio.h>
+#include <tchar.h>
+
+static void dump_file(const TCHAR *path, EXCEPTION_POINTERS *exception)
+{
+	HANDLE file = CreateFile(path, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+
+	MINIDUMP_EXCEPTION_INFORMATION dump;
+	dump.ExceptionPointers = exception;
+	dump.ThreadId = GetCurrentThreadId();
+	dump.ClientPointers = TRUE;
+
+	MiniDumpWriteDump(GetCurrentProcess(), GetCurrentProcessId(), file, MiniDumpWithFullMemory, &dump, NULL, NULL);
+
+	CloseHandle(file);
+}
+
+#if _MSC_VER >= 1300    // for VC 7.0
+// from ATL 7.0 sources
+#ifndef _delayimp_h
+extern "C" IMAGE_DOS_HEADER __ImageBase;
+#endif
+#endif
+
+static HMODULE get_current_module()
+{
+#if _MSC_VER < 1300    // earlier than .NET compiler (VC 6.0)
+
+	// Here's a trick that will get you the handle of the module
+	// you're running in without any a-priori knowledge:
+	// http://www.dotnet247.com/247reference/msgs/13/65259.aspx
+
+	MEMORY_BASIC_INFORMATION mbi;
+	static int dummy;
+	VirtualQuery(&dummy, &mbi, sizeof(mbi));
+
+	return reinterpret_cast<HMODULE>(mbi.AllocationBase);
+
+#else    // VC 7.0
+
+	// from ATL 7.0 sources
+
+	return reinterpret_cast<HMODULE>(&__ImageBase);
+#endif
+}
+
+static long exception_handler(EXCEPTION_POINTERS *ep)
+{
+	char dmp_path[MAX_PATH] = { 0 };
+	char temp_path[MAX_PATH] = { 0 };
+
+	//c://users//appdata//local//temp//recorder.dmp
+	if (GetTempPath(MAX_PATH, temp_path)) {
+		sprintf_s(dmp_path, MAX_PATH, "%srecorder.dmp", temp_path);
+		printf("%s\r\n", dmp_path);
+	}
+	
+
+	dump_file(dmp_path, ep);
+
+	return EXCEPTION_EXECUTE_HANDLER;
+}
+
+bool APIENTRY DllMain(HMODULE hModule,
+	DWORD  ul_reason_for_call,
+	LPVOID lpReserved
+)
+{
+	switch (ul_reason_for_call)
+	{
+	case DLL_PROCESS_ATTACH:
+		SetUnhandledExceptionFilter((LPTOP_LEVEL_EXCEPTION_FILTER)exception_handler);
+		break;
+	case DLL_THREAD_ATTACH:
+		break;
+	case DLL_THREAD_DETACH:
+	case DLL_PROCESS_DETACH:
+		break;
+	}
+	return TRUE;
+}

+ 332 - 0
libs/Recorder/encoder_aac.cpp

@@ -0,0 +1,332 @@
+#include "encoder_aac.h"
+
+#include "ring_buffer.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	encoder_aac::encoder_aac()
+	{
+		av_register_all();
+
+		_ring_buffer = new ring_buffer<AVFrame>(1024 * 1024 * 10);
+
+		_inited = false;
+		_running = false;
+
+		_encoder = NULL;
+		_encoder_ctx = NULL;
+		_frame = NULL;
+		_buff = NULL;
+		_buff_size = 0;
+
+		_cond_notify = false;
+
+#ifdef SAVE_AAC
+		_aac_io_ctx = NULL;
+		_aac_stream = NULL;
+		_aac_fmt_ctx = NULL;
+#endif
+	}
+
+	encoder_aac::~encoder_aac()
+	{
+		stop();
+
+		cleanup();
+
+		delete _ring_buffer;
+	}
+
+	int encoder_aac::init(int nb_channels, int sample_rate, AVSampleFormat fmt,int bit_rate)
+	{
+		int err = AE_NO;
+		int ret = 0;
+
+		if (_inited == true)
+			return err;
+
+		do {
+			_encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
+			if (!_encoder) {
+				err = AE_FFMPEG_FIND_ENCODER_FAILED;
+				break;
+			}
+
+			_encoder_ctx = avcodec_alloc_context3(_encoder);
+			if (!_encoder_ctx) {
+				err = AE_FFMPEG_ALLOC_CONTEXT_FAILED;
+				break;
+			}
+
+			_encoder_ctx->channels = nb_channels;
+			_encoder_ctx->channel_layout = av_get_default_channel_layout(nb_channels);
+			_encoder_ctx->sample_rate = sample_rate;
+			_encoder_ctx->sample_fmt = fmt;
+			_encoder_ctx->bit_rate = bit_rate;
+
+			_encoder_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+			_encoder_ctx->time_base.den = sample_rate;
+			_encoder_ctx->time_base.num = 1;
+			_encoder_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+			ret = avcodec_open2(_encoder_ctx, _encoder, NULL);
+			if (ret < 0) {
+				err = AE_FFMPEG_OPEN_CODEC_FAILED;
+				break;
+			}
+
+			_buff_size = av_samples_get_buffer_size(NULL, nb_channels, _encoder_ctx->frame_size, fmt, 1);
+			_buff = (uint8_t*)av_malloc(_buff_size);
+
+			_frame = av_frame_alloc();
+			if (!_frame) {
+				err = AE_FFMPEG_ALLOC_FRAME_FAILED;
+				break;
+			}
+
+			_frame->channels = nb_channels;
+			_frame->nb_samples = _encoder_ctx->frame_size;
+			_frame->channel_layout = av_get_default_channel_layout(nb_channels);
+			_frame->format = fmt;
+			_frame->sample_rate = sample_rate;
+
+			ret = avcodec_fill_audio_frame(_frame, nb_channels, fmt, _buff, _buff_size, 0);
+
+#ifdef SAVE_AAC
+			ret = avio_open(&_aac_io_ctx, "save.aac", AVIO_FLAG_READ_WRITE);
+			if (ret < 0) {
+				err = AE_FFMPEG_OPEN_IO_FAILED;
+				break;
+			}
+
+			_aac_fmt_ctx = avformat_alloc_context();
+			if (!_aac_fmt_ctx) {
+				err = AE_FFMPEG_ALLOC_CONTEXT_FAILED;
+				break;
+			}
+
+			_aac_fmt_ctx->pb = _aac_io_ctx;
+			_aac_fmt_ctx->oformat = av_guess_format(NULL, "save.aac", NULL);
+			_aac_fmt_ctx->url = av_strdup("save.aac");
+
+			_aac_stream = avformat_new_stream(_aac_fmt_ctx, NULL);
+			if (!_aac_stream) {
+				err = AE_FFMPEG_CREATE_STREAM_FAILED;
+				break;
+			}
+
+			ret = avcodec_parameters_from_context(_aac_stream->codecpar, _encoder_ctx);
+			if (ret < 0) {
+				err = AE_FFMPEG_COPY_PARAMS_FAILED;
+				break;
+			}
+
+			if (_aac_fmt_ctx->oformat->flags | AV_CODEC_FLAG_GLOBAL_HEADER) {
+				_aac_stream->codec->extradata_size = _encoder_ctx->extradata_size;// +AV_INPUT_BUFFER_PADDING_SIZE;
+				_aac_stream->codec->extradata = (uint8_t*)av_memdup(_encoder_ctx->extradata, _encoder_ctx->extradata_size);
+			}
+#endif
+
+			_inited = true;
+
+		} while (0);
+
+		if (err != AE_NO) {
+			al_debug("%s,error:%d", err2str(err), ret);
+			cleanup();
+		}
+
+		return err;
+	}
+
+		int encoder_aac::get_extradata_size()
+		{
+			return _encoder_ctx->extradata_size;
+		}
+
+		const uint8_t * encoder_aac::get_extradata()
+		{
+			return (const uint8_t*)_encoder_ctx->extradata;
+		}
+
+		int encoder_aac::get_nb_samples()
+		{
+			return _encoder_ctx->frame_size;
+		}
+
+	int encoder_aac::start()
+	{
+		int error = AE_NO;
+
+		if (_running == true) {
+			return error;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+		_running = true;
+		_thread = std::thread(std::bind(&encoder_aac::encode_loop, this));
+
+		return error;
+	}
+
+	void encoder_aac::stop()
+	{
+		_running = false;
+
+		_cond_notify = true;
+		_cond_var.notify_all();
+
+		if (_thread.joinable())
+			_thread.join();
+	}
+
+	int encoder_aac::put(const uint8_t * data, int data_len, AVFrame *frame)
+	{
+		std::unique_lock<std::mutex> lock(_mutex);
+		
+		AVFrame frame_cp;
+		memcpy(&frame_cp, frame, sizeof(AVFrame));
+
+		_ring_buffer->put(data, data_len,frame_cp);
+		
+		_cond_notify = true;
+		_cond_var.notify_all();
+
+		return 0;
+	}
+
+	const AVRational & encoder_aac::get_time_base()
+	{
+		return _encoder_ctx->time_base;
+	}
+
+	AVCodecID encoder_aac::get_codec_id()
+	{
+		if (_inited == false) return AV_CODEC_ID_NONE;
+
+		return _encoder->id;
+	}
+
+	int encoder_aac::encode(AVFrame * frame, AVPacket * packet)
+	{
+		int ret = avcodec_send_frame(_encoder_ctx, frame);
+		if (ret < 0) {
+			return AE_FFMPEG_ENCODE_FRAME_FAILED;
+		}
+
+
+		while (ret == 0) {
+			av_init_packet(packet);
+
+			ret = avcodec_receive_packet(_encoder_ctx, packet);
+			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+				break;
+			}
+
+			if (ret < 0) {
+				return AE_FFMPEG_READ_PACKET_FAILED;
+			}
+
+			
+			if (ret == 0 && _on_data)
+				_on_data(packet);
+			
+
+#ifdef SAVE_AAC
+			av_packet_rescale_ts(packet, _encoder_ctx->time_base, _aac_stream->time_base);
+			packet->stream_index = _aac_stream->index;
+			av_write_frame(_aac_fmt_ctx, packet);
+#endif
+
+			av_packet_unref(packet);
+		}
+
+		return AE_NO;
+	}
+
+	void encoder_aac::encode_loop()
+	{
+		int len = 0;
+		int error = AE_NO;
+
+		AVPacket *packet = av_packet_alloc();
+		AVFrame pcm_frame;
+
+#ifdef SAVE_AAC
+		avformat_write_header(_aac_fmt_ctx, NULL);
+#endif
+
+		while (_running)
+		{
+			std::unique_lock<std::mutex> lock(_mutex);
+			while (!_cond_notify && _running)
+				_cond_var.wait_for(lock, std::chrono::milliseconds(300));
+
+			while ((len = _ring_buffer->get(_buff, _buff_size, pcm_frame))) {
+				_frame->pts = pcm_frame.pts;
+				_frame->pkt_pts = pcm_frame.pkt_pts;
+				_frame->pkt_dts = pcm_frame.pkt_dts;
+
+				if ((error = encode(_frame, packet)) != AE_NO) {
+					if (_on_error) 
+						_on_error(error);
+
+					al_fatal("read aac packet failed:%d", error);
+
+					break;
+				}
+
+			}
+
+			_cond_notify = false;
+
+		}
+
+		//flush pcm data in encoder
+		encode(NULL, packet);
+
+		av_packet_free(&packet);
+#ifdef SAVE_AAC
+		av_write_trailer(_aac_fmt_ctx);
+#endif
+
+	}
+
+	void encoder_aac::cleanup()
+	{
+		if (_encoder) {
+			avcodec_close(_encoder_ctx);
+		}
+
+		_encoder = NULL;
+
+		if (_encoder_ctx) {
+			avcodec_free_context(&_encoder_ctx);
+		}
+
+		if (_frame)
+			av_free(_frame);
+		_frame = NULL;
+
+		if (_buff)
+			av_free(_buff);
+
+		_buff = NULL;
+
+		_encoder_ctx = NULL;
+
+#ifdef SAVE_AAC
+		if (_aac_fmt_ctx) {
+			avio_closep(&_aac_fmt_ctx->pb);
+			avformat_free_context(_aac_fmt_ctx);
+		}
+#endif
+
+	}
+}

+ 92 - 0
libs/Recorder/encoder_aac.h

@@ -0,0 +1,92 @@
+#ifndef ENCODER_AAC
+#define ENCODER_AAC
+
+#include <atomic>
+#include <thread>
+#include <functional>
+#include <mutex>
+#include <condition_variable>
+
+#include "headers_ffmpeg.h"
+#include "ring_buffer.h"
+
+//#define SAVE_AAC
+
+namespace am {
+	typedef std::function<void(AVPacket *packet)> cb_aac_data;
+	typedef std::function<void(int)> cb_aac_error;
+
+
+	class encoder_aac {
+	public:
+		encoder_aac();
+		~encoder_aac();
+
+		int init(
+			int nb_channels,
+			int sample_rate,
+			AVSampleFormat fmt,
+			int bit_rate
+		);
+
+		int get_extradata_size();
+		const uint8_t* get_extradata();
+
+		int get_nb_samples();
+
+		int start();
+
+		void stop();
+		
+		int put(const uint8_t *data,int data_len,AVFrame *frame);
+
+		inline void registe_cb(
+			cb_aac_data on_data,
+			cb_aac_error on_error) {
+			_on_data = on_data;
+			_on_error = on_error;
+		}
+
+		const AVRational &get_time_base();
+
+		AVCodecID get_codec_id();
+
+	private:
+		int encode(AVFrame *frame, AVPacket *packet);
+
+		void encode_loop();
+
+		void cleanup();
+
+	private:
+		cb_aac_data _on_data;
+		cb_aac_error _on_error;
+
+		ring_buffer<AVFrame> *_ring_buffer;
+
+		std::atomic_bool _inited;
+		std::atomic_bool _running;
+
+		std::thread _thread;
+
+		AVCodec *_encoder;
+		AVCodecContext *_encoder_ctx;
+		AVFrame *_frame;
+		uint8_t *_buff;
+		int _buff_size;
+
+		std::mutex _mutex;
+		std::condition_variable _cond_var;
+		bool _cond_notify;
+
+#ifdef SAVE_AAC
+		AVIOContext *_aac_io_ctx;
+		AVStream *_aac_stream;
+		AVFormatContext *_aac_fmt_ctx;
+#endif
+	};
+}
+
+
+
+#endif

+ 70 - 0
libs/Recorder/encoder_video.cpp

@@ -0,0 +1,70 @@
+#include "encoder_video.h"
+
+namespace am {
+
+	encoder_video::encoder_video()
+	{
+		_inited = false;
+		_running = false;
+
+		_time_base = { 0,AV_TIME_BASE };
+
+		_encoder_id = EID_VIDEO_X264;
+		_encoder_type = ET_VIDEO_SOFT;
+
+		_cond_notify = false;
+
+		_ring_buffer = new ring_buffer<AVFrame>();
+	}
+
+
+	encoder_video::~encoder_video()
+	{
+		if(_ring_buffer)
+			delete _ring_buffer;
+	}
+
+	int encoder_video::start()
+	{
+		int error = AE_NO;
+
+		if (_running == true) {
+			return error;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+		_running = true;
+		_thread = std::thread(std::bind(&encoder_video::encode_loop, this));
+
+		return error;
+	}
+
+	void encoder_video::stop()
+	{
+		_running = false;
+
+		_cond_notify = true;
+		_cond_var.notify_all();
+
+		if (_thread.joinable())
+			_thread.join();
+	}
+
+	int encoder_video::put(const uint8_t * data, int data_len, AVFrame * frame)
+	{
+		std::unique_lock<std::mutex> lock(_mutex);
+
+		AVFrame frame_cp;
+		memcpy(&frame_cp, frame, sizeof(AVFrame));
+
+		_ring_buffer->put(data, data_len, frame_cp);
+
+		_cond_notify = true;
+		_cond_var.notify_all();
+		return 0;
+	}
+
+}

+ 83 - 0
libs/Recorder/encoder_video.h

@@ -0,0 +1,83 @@
+#ifndef ENCODER_VIDEO
+#define ENCODER_VIDEO
+
+#include <atomic>
+#include <thread>
+#include <functional>
+#include <mutex>
+#include <condition_variable>
+
+#include "headers_ffmpeg.h"
+#include "ring_buffer.h"
+
+#include "encoder_video_define.h"
+
+namespace am {
+
+	typedef std::function<void(AVPacket *packet)> cb_264_data;
+	typedef std::function<void(int)> cb_264_error;
+
+	class encoder_video
+	{
+	public:
+		encoder_video();
+		virtual ~encoder_video();
+
+		virtual int init(
+			int pic_width, 
+			int pic_height,
+			int frame_rate,
+			int bit_rate, 
+			int qb, 
+			int key_pic_sec = 2) = 0;
+
+		virtual int get_extradata_size() = 0;
+		virtual const uint8_t* get_extradata() = 0;
+
+		inline void registe_cb(
+			cb_264_data on_data,
+			cb_264_error on_error) {
+			_on_data = on_data;
+			_on_error = on_error;
+		}
+
+		inline const AVRational &get_time_base() {
+			return _time_base;
+		};
+
+		virtual int start();
+
+		virtual void stop();
+
+		virtual int put(const uint8_t *data, int data_len, AVFrame *frame);
+
+		virtual AVCodecID get_codec_id() = 0;
+
+	protected:
+		virtual void cleanup() = 0;
+		virtual void encode_loop() = 0;
+
+	protected:
+		ENCODER_VIDEO_ID _encoder_id;
+		ENCODER_VIDEO_TYPES _encoder_type;
+
+		cb_264_data _on_data;
+		cb_264_error _on_error;
+
+		ring_buffer<AVFrame> *_ring_buffer;
+
+		AVRational _time_base;
+
+		std::atomic_bool _inited;
+		std::atomic_bool _running;
+
+		std::thread _thread;
+
+		std::mutex _mutex;
+		std::condition_variable _cond_var;
+		bool _cond_notify;
+	};
+
+}
+
+#endif // !ENCODER_VIDEO

+ 24 - 0
libs/Recorder/encoder_video_define.h

@@ -0,0 +1,24 @@
+#ifndef ENCODER_VIDEO_DEFINE
+#define ENCODER_VIDEO_DEFINE
+
+#include "hardware_acceleration.h"
+
+namespace am {
+
+	typedef enum {
+		EID_VIDEO_X264  = HARDWARE_TYPE_UNKNOWN,
+		EID_VIDEO_NVENC = HARDWARE_TYPE_NVENC,
+		EID_VIDEO_QSV = HARDWARE_TYPE_QSV,
+		EID_VIDEO_AMF = HARDWARE_TYPE_AMF,
+		EID_VIDEO_VAAPI = HARDWARE_TYPE_VAAPI,
+	}ENCODER_VIDEO_ID;
+
+	typedef enum {
+		ET_VIDEO_SOFT,
+		ET_VIDEO_HARDWARE
+	}ENCODER_VIDEO_TYPES;
+
+}
+
+#endif // !ENCODER_VIDEO_DEFINE
+

+ 43 - 0
libs/Recorder/encoder_video_factory.cpp

@@ -0,0 +1,43 @@
+#include "encoder_video_factory.h"
+
+#include "encoder_video.h"
+#include "encoder_video_x264.h"
+#include "encoder_video_nvenc.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	int encoder_video_new(ENCODER_VIDEO_ID id, encoder_video ** encoder)
+	{
+		int err = AE_NO;
+
+		switch (id)
+		{
+		case EID_VIDEO_X264:
+			*encoder = (encoder_video*)new encoder_video_x264();
+			break;
+		case EID_VIDEO_NVENC:
+			*encoder = (encoder_video*)new encoder_video_nvenc();
+			break;
+		default:
+			err = AE_UNSUPPORT;
+			break;
+		}
+
+		return err;
+	}
+
+	void encoder_video_destroy(encoder_video ** encoder)
+	{
+		if (*encoder != nullptr) {
+			(*encoder)->stop();
+
+			delete *encoder;
+		}
+
+		*encoder = nullptr;
+	}
+
+}

+ 15 - 0
libs/Recorder/encoder_video_factory.h

@@ -0,0 +1,15 @@
+#ifndef ENCODER_VIDEO_FACTORY
+#define ENCODER_VIDEO_FACTORY
+
+#include "encoder_video_define.h"
+
+namespace am {
+	class encoder_video;
+
+	int encoder_video_new(ENCODER_VIDEO_ID id, encoder_video **encoder);
+
+	void encoder_video_destroy(encoder_video **encoder);
+}
+
+#endif // !ENCODER_VIDEO_FACTORY
+

+ 237 - 0
libs/Recorder/encoder_video_nvenc.cpp

@@ -0,0 +1,237 @@
+#include "encoder_video_nvenc.h"
+
+namespace am {
+
+	encoder_video_nvenc::encoder_video_nvenc()
+	{
+		av_register_all();
+
+		_encoder = NULL;
+		_encoder_ctx = NULL;
+		_frame = NULL;
+		_buff = NULL;
+		_buff_size = 0;
+		_y_size = 0;
+	}
+
+
+	encoder_video_nvenc::~encoder_video_nvenc()
+	{
+		stop();
+
+		cleanup();
+	}
+
+	int encoder_video_nvenc::init(int pic_width, int pic_height, int frame_rate, int bit_rate, int qb, int key_pic_sec)
+	{
+		if (_inited == true)
+			return AE_NO;
+
+		int err = AE_NO;
+		int ret = 0;
+
+		do {
+			_encoder = avcodec_find_encoder_by_name("h264_nvenc");
+			if(!_encoder) _encoder = avcodec_find_encoder_by_name("nvenc_h264");
+
+			if (!_encoder) {
+				err = AE_FFMPEG_FIND_ENCODER_FAILED;
+				break;
+			}
+
+			_encoder_ctx = avcodec_alloc_context3(_encoder);
+			if (!_encoder_ctx) {
+				err = AE_FFMPEG_ALLOC_CONTEXT_FAILED;
+				break;
+			}
+
+			// ffmpeg -h encoder=h264_nvenc show all encoder options
+
+			const char *rate_control = "cbr"; // cbr | cqp | vbr | lossless
+			const char *profile = "baseline"; // baseline | main | high |high444p
+			const char *preset = "default";      // default | slow | medium | fast | 
+											  // hp | hq | bd | 11 | 11hq | 11hp | lossless | losslesshp
+
+#if 0//USE_CBR
+			av_opt_set_int(_encoder_ctx->priv_data, "cbr", true, 0);
+			_encoder_ctx->bit_rate = bit_rate - bit_rate * (100 - qb) / 100;
+			_encoder_ctx->rc_buffer_size = _encoder_ctx->bit_rate;
+			_encoder_ctx->rc_max_rate = _encoder_ctx->bit_rate;
+			_encoder_ctx->rc_min_rate = _encoder_ctx->bit_rate;
+#else
+			_encoder_ctx->bit_rate = bit_rate;
+			
+			//qb is 0 ~ 100
+			qb = max(min(qb, 100), 0);
+
+			//for qmax more larger,quality is more less, max qmax is qmin + 30*(100 - 0)/100 = qmin + 30
+			_encoder_ctx->qmin = 30;
+			_encoder_ctx->qmax = _encoder_ctx->qmin + 15 * (100 - qb) / 100;
+			
+#endif
+
+			av_opt_set(_encoder_ctx->priv_data, "profile", profile, 0);
+			av_opt_set(_encoder_ctx->priv_data, "preset", preset, 0);
+
+			av_opt_set(_encoder_ctx->priv_data, "level", "auto", 0);
+			av_opt_set_int(_encoder_ctx->priv_data, "2pass", false, 0);
+			av_opt_set_int(_encoder_ctx->priv_data, "gpu", 0, 0);
+
+			
+			_encoder_ctx->width = pic_width;
+			_encoder_ctx->height = pic_height;
+			_encoder_ctx->time_base.num = 1;
+			_encoder_ctx->time_base.den = frame_rate;
+			_encoder_ctx->framerate = { frame_rate,1 };
+			_encoder_ctx->pix_fmt = AV_PIX_FMT_YUV420P;		
+			
+			if (key_pic_sec == 0)
+				_encoder_ctx->gop_size = 250;
+			else
+				_encoder_ctx->gop_size = key_pic_sec * _encoder_ctx->time_base.den / _encoder_ctx->time_base.num;
+
+			_encoder_ctx->max_b_frames = 0;//NO B Frame
+			_encoder_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+			ret = avcodec_open2(_encoder_ctx, _encoder, nullptr);
+			if (ret != 0) {
+				err = AE_FFMPEG_OPEN_CODEC_FAILED;
+				break;
+			}
+
+			_frame = av_frame_alloc();
+
+			_buff_size = av_image_get_buffer_size(_encoder_ctx->pix_fmt, _encoder_ctx->width, _encoder_ctx->height, 1);
+
+			_buff = (uint8_t*)av_malloc(_buff_size);
+			if (!_buff) {
+				break;
+			}
+
+			av_image_fill_arrays(_frame->data, _frame->linesize, _buff, _encoder_ctx->pix_fmt, _encoder_ctx->width, _encoder_ctx->height, 1);
+
+			_frame->format = _encoder_ctx->pix_fmt;
+			_frame->width = _encoder_ctx->width;
+			_frame->height = _encoder_ctx->height;
+
+			_y_size = _encoder_ctx->width * _encoder_ctx->height;
+
+			_time_base = _encoder_ctx->time_base;
+
+			_inited = true;
+		} while (0);
+
+		if (err != AE_NO) {
+			al_debug("%s,error:%d %lu", err2str(err), ret, GetLastError());
+			cleanup();
+		}
+
+		return err;
+	}
+
+	int encoder_video_nvenc::get_extradata_size()
+	{
+		return _encoder_ctx->extradata_size;
+	}
+
+	const uint8_t * encoder_video_nvenc::get_extradata()
+	{
+		return (const uint8_t*)_encoder_ctx->extradata;
+	}
+
+	AVCodecID encoder_video_nvenc::get_codec_id()
+	{
+		if (_inited == false) return AV_CODEC_ID_NONE;
+
+		return _encoder->id;
+	}
+
+	void encoder_video_nvenc::cleanup()
+	{
+		if (_frame)
+			av_free(_frame);
+		_frame = NULL;
+
+		if (_buff)
+			av_free(_buff);
+
+		_buff = NULL;
+
+		if (_encoder)
+			avcodec_close(_encoder_ctx);
+
+		_encoder = NULL;
+
+		if (_encoder_ctx)
+			avcodec_free_context(&_encoder_ctx);
+
+		_encoder_ctx = NULL;
+	}
+
+	void encoder_video_nvenc::encode_loop()
+	{
+		AVPacket *packet = av_packet_alloc();
+		AVFrame yuv_frame;
+
+		int error = AE_NO;
+
+		while (_running)
+		{
+			std::unique_lock<std::mutex> lock(_mutex);
+			while (!_cond_notify && _running)
+				_cond_var.wait_for(lock, std::chrono::milliseconds(300));
+
+			while (_ring_buffer->get(_buff, _buff_size, yuv_frame)) {
+				_frame->pkt_dts = yuv_frame.pkt_dts;
+				_frame->pkt_dts = yuv_frame.pkt_dts;
+				_frame->pts = yuv_frame.pts;
+
+				if ((error = encode(_frame, packet)) != AE_NO) {
+					if (_on_error)
+						_on_error(error);
+
+					al_fatal("encode 264 packet failed:%d", error);
+
+					break;
+				}
+			}
+
+			_cond_notify = false;
+		}
+
+		//flush frame in encoder
+		encode(NULL, packet);
+
+		av_packet_free(&packet);
+	}
+
+	int encoder_video_nvenc::encode(AVFrame * frame, AVPacket * packet)
+	{
+		int ret = avcodec_send_frame(_encoder_ctx, frame);
+		if (ret < 0) {
+			return AE_FFMPEG_ENCODE_FRAME_FAILED;
+		}
+
+		while (ret >= 0) {
+
+			av_init_packet(packet);
+
+			ret = avcodec_receive_packet(_encoder_ctx, packet);
+			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+				break;
+			}
+
+			if (ret < 0) {
+				return AE_FFMPEG_READ_PACKET_FAILED;
+			}
+
+			if (ret == 0 && _on_data)
+				_on_data(packet);
+
+			av_packet_unref(packet);
+		}
+
+		return AE_NO;
+	}
+
+}

+ 46 - 0
libs/Recorder/encoder_video_nvenc.h

@@ -0,0 +1,46 @@
+#ifndef ENCODER_VIDEO_NVENC
+#define ENCODER_VIDEO_NVENC
+
+#include "encoder_video.h"
+
+namespace am {
+
+	class encoder_video_nvenc :
+		public encoder_video
+	{
+	public:
+		encoder_video_nvenc();
+		~encoder_video_nvenc();
+
+		int init(int pic_width, 
+			int pic_height, 
+			int frame_rate, 
+			int bit_rate, 
+			int qb, 
+			int key_pic_sec = 2
+		);
+
+		int get_extradata_size();
+		const uint8_t* get_extradata();
+
+		AVCodecID get_codec_id();
+
+	protected:
+		void cleanup();
+		void encode_loop();
+
+	private:
+		int encode(AVFrame *frame, AVPacket *packet);
+
+	private:
+		AVCodec *_encoder;
+		AVCodecContext *_encoder_ctx;
+		AVFrame *_frame;
+		uint8_t *_buff;
+		int _buff_size;
+		int _y_size;
+	};
+
+}
+
+#endif // !ENCODER_VIDEO_NVENC

+ 222 - 0
libs/Recorder/encoder_video_x264.cpp

@@ -0,0 +1,222 @@
+#include "encoder_video_x264.h"
+
+#include "log_helper.h"
+#include "error_define.h"
+
+namespace am {
+
+	encoder_video_x264::encoder_video_x264()
+	{
+		av_register_all();
+
+		_encoder = NULL;
+		_encoder_ctx = NULL;
+		_frame = NULL;
+		_buff = NULL;
+		_buff_size = 0;
+		_y_size = 0;
+	}
+
+	encoder_video_x264::~encoder_video_x264()
+	{
+		stop();
+
+		cleanup();
+	}
+
+	int encoder_video_x264::init(int pic_width, int pic_height, int frame_rate, int bit_rate ,int qb, int key_pic_sec)
+	{
+		if (_inited == true)
+			return AE_NO;
+
+		int err = AE_NO;
+		int ret = 0;
+
+		AVDictionary *options = 0;
+
+		av_dict_set(&options, "preset", "superfast", 0);
+		av_dict_set(&options, "tune", "zerolatency", 0);
+
+		do {
+			_encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
+			if (!_encoder) {
+				err = AE_FFMPEG_FIND_ENCODER_FAILED;
+				break;
+			}
+
+			_encoder_ctx = avcodec_alloc_context3(_encoder);
+			if (!_encoder_ctx) {
+				err = AE_FFMPEG_ALLOC_CONTEXT_FAILED;
+				break;
+			}
+
+			_encoder_ctx->codec_id = AV_CODEC_ID_H264;
+			_encoder_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
+			_encoder_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
+			_encoder_ctx->width = pic_width;
+			_encoder_ctx->height = pic_height;
+			_encoder_ctx->time_base.num = 1;
+			_encoder_ctx->time_base.den = frame_rate;
+			_encoder_ctx->framerate = { frame_rate,1 };
+			_encoder_ctx->bit_rate = bit_rate;
+			
+			if (key_pic_sec == 0)
+				_encoder_ctx->gop_size = 250;
+			else
+				_encoder_ctx->gop_size = key_pic_sec * _encoder_ctx->time_base.den / _encoder_ctx->time_base.num;
+			
+			//qb is 0 ~ 100
+			qb = max(min(qb, 100), 0);
+
+			//for qmax more larger,quality is more less, max qmax is qmin + 30*(100 - 0)/100 = qmin + 30
+			_encoder_ctx->qmin = 30;
+			_encoder_ctx->qmax = _encoder_ctx->qmin + 15 * (100 - qb) / 100;
+
+			_encoder_ctx->max_b_frames = 0;//NO B Frame
+			_encoder_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+			ret = avcodec_open2(_encoder_ctx, _encoder, &options);
+			if (ret != 0) {
+				err = AE_FFMPEG_OPEN_CODEC_FAILED;
+				break;
+			}
+
+			_frame = av_frame_alloc();
+
+			_buff_size = av_image_get_buffer_size(_encoder_ctx->pix_fmt, _encoder_ctx->width, _encoder_ctx->height, 1);
+			
+			_buff = (uint8_t*)av_malloc(_buff_size);
+			if (!_buff) {
+				break;
+			}
+
+			av_image_fill_arrays(_frame->data, _frame->linesize, _buff, _encoder_ctx->pix_fmt, _encoder_ctx->width, _encoder_ctx->height, 1);
+
+			_frame->format = _encoder_ctx->pix_fmt;
+			_frame->width = _encoder_ctx->width;
+			_frame->height = _encoder_ctx->height;
+
+			_y_size = _encoder_ctx->width * _encoder_ctx->height;
+
+			_time_base = _encoder_ctx->time_base;
+			
+			_inited = true;
+		} while (0);
+
+		if (err != AE_NO) {
+			al_debug("%s,error:%d %lu", err2str(err), ret, GetLastError());
+			cleanup();
+		}
+
+		if(options)
+			av_dict_free(&options);
+
+		return err;
+	}
+
+	int encoder_video_x264::get_extradata_size()
+	{
+		return _encoder_ctx->extradata_size;
+	}
+
+	const uint8_t * encoder_video_x264::get_extradata()
+	{
+		return (const uint8_t*)_encoder_ctx->extradata;
+	}
+
+	AVCodecID encoder_video_x264::get_codec_id()
+	{
+		if (_inited == false) return AV_CODEC_ID_NONE;
+
+		return _encoder->id;
+	}
+
+	void encoder_video_x264::cleanup()
+	{
+		if (_frame)
+			av_free(_frame);
+		_frame = NULL;
+
+		if (_buff)
+			av_free(_buff);
+
+		_buff = NULL;
+
+		if (_encoder)
+			avcodec_close(_encoder_ctx);
+
+		_encoder = NULL;
+
+		if (_encoder_ctx)
+			avcodec_free_context(&_encoder_ctx);
+
+		_encoder_ctx = NULL;
+	}
+
+	int encoder_video_x264::encode(AVFrame * frame, AVPacket * packet)
+	{
+		int ret = avcodec_send_frame(_encoder_ctx, frame);
+		if (ret < 0) {
+			return AE_FFMPEG_ENCODE_FRAME_FAILED;
+		}
+
+		while (ret >= 0) {
+			
+			av_init_packet(packet);
+
+			ret = avcodec_receive_packet(_encoder_ctx, packet);
+			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+				break;
+			}
+
+			if (ret < 0) {
+				return AE_FFMPEG_READ_PACKET_FAILED;
+			}
+
+			if (ret == 0 && _on_data)
+				_on_data(packet);
+
+			av_packet_unref(packet);
+		}
+
+		return AE_NO;
+	}
+
+	void encoder_video_x264::encode_loop()
+	{
+		AVPacket *packet = av_packet_alloc();
+		AVFrame yuv_frame;
+
+		int error = AE_NO;
+
+		while (_running)
+		{
+			std::unique_lock<std::mutex> lock(_mutex);
+			while (!_cond_notify && _running)
+				_cond_var.wait_for(lock, std::chrono::milliseconds(300));
+
+			while (_ring_buffer->get(_buff, _buff_size, yuv_frame)) {
+				_frame->pkt_dts = yuv_frame.pkt_dts;
+				_frame->pkt_dts = yuv_frame.pkt_dts;
+				_frame->pts = yuv_frame.pts;
+
+				if ((error = encode(_frame, packet)) != AE_NO) {
+					if (_on_error) 
+						_on_error(error);
+
+					al_fatal("encode 264 packet failed:%d", error);
+
+					break;
+				}
+			}
+			
+			_cond_notify = false;
+		}
+
+		//flush frame in encoder
+		encode(NULL, packet);
+
+		av_packet_free(&packet);
+	}
+
+}

+ 39 - 0
libs/Recorder/encoder_video_x264.h

@@ -0,0 +1,39 @@
+#ifndef ENCODER_VIDEO_X264
+#define ENCODER_VIDEO_X264
+
+#include "encoder_video.h"
+
+namespace am {
+	class encoder_video_x264 :
+		public encoder_video
+	{
+	public:
+		encoder_video_x264();
+		~encoder_video_x264();
+
+		int init(int pic_width, int pic_height, int frame_rate, int bit_rate, int qb, int key_pic_sec = 2);
+
+		int get_extradata_size();
+		const uint8_t* get_extradata();
+
+		AVCodecID get_codec_id();
+
+	protected:
+		void cleanup();
+		void encode_loop();
+
+	private:
+		int encode(AVFrame *frame, AVPacket *packet);
+
+	private:
+		AVCodec *_encoder;
+		AVCodecContext *_encoder_ctx;
+		AVFrame *_frame;
+		uint8_t *_buff;
+		int _buff_size;
+		int _y_size;
+	};
+}
+
+
+#endif

+ 218 - 0
libs/Recorder/error_define.h

@@ -0,0 +1,218 @@
+#ifndef ERROR_DEFINE
+#define ERROR_DEFINE
+
+
+enum AM_ERROR{
+	AE_NO = 0,
+	AE_ERROR,
+	AE_UNSUPPORT,
+	AE_INVALID_CONTEXT,
+	AE_NEED_INIT,
+	AE_TIMEOUT,
+	AE_ALLOCATE_FAILED,
+
+	//AE_CO_
+	AE_CO_INITED_FAILED,
+	AE_CO_CREATE_FAILED,
+	AE_CO_GETENDPOINT_FAILED,
+	AE_CO_ACTIVE_DEVICE_FAILED,
+	AE_CO_GET_FORMAT_FAILED,
+	AE_CO_AUDIOCLIENT_INIT_FAILED,
+	AE_CO_GET_CAPTURE_FAILED,
+	AE_CO_CREATE_EVENT_FAILED,
+	AE_CO_SET_EVENT_FAILED,
+	AE_CO_START_FAILED,
+	AE_CO_ENUMENDPOINT_FAILED,
+	AE_CO_GET_ENDPOINT_COUNT_FAILED,
+	AE_CO_GET_ENDPOINT_ID_FAILED,
+	AE_CO_OPEN_PROPERTY_FAILED,
+	AE_CO_GET_VALUE_FAILED,
+	AE_CO_GET_BUFFER_FAILED,
+	AE_CO_RELEASE_BUFFER_FAILED,
+	AE_CO_GET_PACKET_FAILED,
+	AE_CO_PADDING_UNEXPECTED,
+
+	//AE_FFMPEG_
+	AE_FFMPEG_OPEN_INPUT_FAILED,
+	AE_FFMPEG_FIND_STREAM_FAILED,
+	AE_FFMPEG_FIND_DECODER_FAILED,
+	AE_FFMPEG_OPEN_CODEC_FAILED,
+	AE_FFMPEG_READ_FRAME_FAILED,
+	AE_FFMPEG_READ_PACKET_FAILED,
+	AE_FFMPEG_DECODE_FRAME_FAILED,
+	AE_FFMPEG_NEW_SWSCALE_FAILED,
+	AE_FFMPEG_FIND_ENCODER_FAILED,
+	AE_FFMPEG_ALLOC_CONTEXT_FAILED,
+	AE_FFMPEG_ENCODE_FRAME_FAILED,
+	AE_FFMPEG_ALLOC_FRAME_FAILED,
+	AE_FFMPEG_OPEN_IO_FAILED,
+	AE_FFMPEG_CREATE_STREAM_FAILED,
+	AE_FFMPEG_COPY_PARAMS_FAILED,
+	AE_RESAMPLE_INIT_FAILED,
+	AE_FFMPEG_NEW_STREAM_FAILED,
+	AE_FFMPEG_FIND_INPUT_FMT_FAILED,
+	AE_FFMPEG_WRITE_HEADER_FAILED,
+	AE_FFMPEG_WRITE_TRAILER_FAILED,
+	AE_FFMPEG_WRITE_FRAME_FAILED,
+
+	//AE_FILTER_
+	AE_FILTER_ALLOC_GRAPH_FAILED,
+	AE_FILTER_CREATE_FILTER_FAILED,
+	AE_FILTER_PARSE_PTR_FAILED,
+	AE_FILTER_CONFIG_FAILED,
+	AE_FILTER_INVALID_CTX_INDEX,
+	AE_FILTER_ADD_FRAME_FAILED,
+
+	//AE_GDI_
+	AE_GDI_GET_DC_FAILED,
+	AE_GDI_CREATE_DC_FAILED,
+	AE_GDI_CREATE_BMP_FAILED,
+	AE_GDI_BITBLT_FAILED,
+	AE_GDI_GET_DIBITS_FAILED,
+
+	//AE_D3D_
+	AE_D3D_LOAD_FAILED,
+	AE_D3D_GET_PROC_FAILED,
+	AE_D3D_CREATE_DEVICE_FAILED,
+	AE_D3D_QUERYINTERFACE_FAILED,
+	AE_D3D_CREATE_VERTEX_SHADER_FAILED,
+	AE_D3D_CREATE_INLAYOUT_FAILED,
+	AE_D3D_CREATE_PIXEL_SHADER_FAILED,
+	AE_D3D_CREATE_SAMPLERSTATE_FAILED,
+
+	//AE_DXGI_
+	AE_DXGI_GET_PROC_FAILED,
+	AE_DXGI_GET_ADAPTER_FAILED,
+	AE_DXGI_GET_FACTORY_FAILED,
+	AE_DXGI_FOUND_ADAPTER_FAILED,
+
+	//AE_DUP_
+	AE_DUP_ATTATCH_FAILED,
+	AE_DUP_QI_FAILED,
+	AE_DUP_GET_PARENT_FAILED,
+	AE_DUP_ENUM_OUTPUT_FAILED,
+	AE_DUP_DUPLICATE_MAX_FAILED,
+	AE_DUP_DUPLICATE_FAILED,
+	AE_DUP_RELEASE_FRAME_FAILED,
+	AE_DUP_ACQUIRE_FRAME_FAILED,
+	AE_DUP_QI_FRAME_FAILED,
+	AE_DUP_CREATE_TEXTURE_FAILED,
+	AE_DUP_QI_DXGI_FAILED,
+	AE_DUP_MAP_FAILED,
+	AE_DUP_GET_CURSORSHAPE_FAILED,
+
+	//AE_REMUX_
+	AE_REMUX_RUNNING,
+	AE_REMUX_NOT_EXIST,
+	AE_REMUX_INVALID_INOUT,
+
+	// AE_WGC_
+  AE_WGC_CREATE_CAPTURER_FAILED,
+
+	AE_MAX
+};
+
+static const char *ERRORS_STR[] = {
+	"no error",                         //AE_NO
+	"error",                            //AE_ERROR
+	"not support for now",              //AE_UNSUPPORT
+	"invalid context",                  //AE_INVALID_CONTEXT
+	"need init first",                  //AE_NEED_INIT
+	"operation timeout",                //AE_TIMEOUT
+	"allocate memory failed",           //AE_ALLOCATE_FAILED,
+
+	"com init failed",                  //AE_CO_INITED_FAILED
+	"com create instance failed",       //AE_CO_CREATE_FAILED
+	"com get endpoint failed",          //AE_CO_GETENDPOINT_FAILED
+	"com active device failed",         //AE_CO_ACTIVE_DEVICE_FAILED
+	"com get wave formatex failed",     //AE_CO_GET_FORMAT_FAILED
+	"com audio client init failed",     //AE_CO_AUDIOCLIENT_INIT_FAILED
+	"com audio get capture failed",     //AE_CO_GET_CAPTURE_FAILED
+	"com audio create event failed",    //AE_CO_CREATE_EVENT_FAILED
+	"com set ready event failed",       //AE_CO_SET_EVENT_FAILED
+	"com start to record failed",       //AE_CO_START_FAILED
+	"com enum audio endpoints failed",  //AE_CO_ENUMENDPOINT_FAILED
+	"com get endpoints count failed",   //AE_CO_GET_ENDPOINT_COUNT_FAILED
+	"com get endpoint id failed",       //AE_CO_GET_ENDPOINT_ID_FAILED
+	"com open endpoint property failed", //AE_CO_OPEN_PROPERTY_FAILED
+	"com get property value failed",    //AE_CO_GET_VALUE_FAILED
+	"com get buffer failed",            //AE_CO_GET_BUFFER_FAILED
+	"com release buffer failed",        //AE_CO_RELEASE_BUFFER_FAILED
+	"com get packet size failed",       //AE_CO_GET_PACKET_FAILED
+	"com get padding size unexpected",  //AE_CO_PADDING_UNEXPECTED
+
+	"ffmpeg open input failed",         //AE_FFMPEG_OPEN_INPUT_FAILED
+	"ffmpeg find stream info failed",   //AE_FFMPEG_FIND_STREAM_FAILED
+	"ffmpeg find decoder failed",       //AE_FFMPEG_FIND_DECODER_FAILED
+	"ffmpeg open codec failed",         //AE_FFMPEG_OPEN_CODEC_FAILED
+	"ffmpeg read frame failed",         //AE_FFMPEG_READ_FRAME_FAILED
+	"ffmpeg read packet failed",        //AE_FFMPEG_READ_PACKET_FAILED
+	"ffmpeg decode frame failed",       //AE_FFMPEG_DECODE_FRAME_FAILED
+	"ffmpeg create swscale failed",     //AE_FFMPEG_NEW_SWSCALE_FAILED
+
+	"ffmpeg find encoder failed",       //AE_FFMPEG_FIND_ENCODER_FAILED
+	"ffmpeg alloc context failed",      //AE_FFMPEG_ALLOC_CONTEXT_FAILED
+	"ffmpeg encode frame failed",       //AE_FFMPEG_ENCODE_FRAME_FAILED
+	"ffmpeg alloc frame failed",        //AE_FFMPEG_ALLOC_FRAME_FAILED
+	
+	"ffmpeg open io ctx failed",        //AE_FFMPEG_OPEN_IO_FAILED
+	"ffmpeg new stream failed",         //AE_FFMPEG_CREATE_STREAM_FAILED
+	"ffmpeg copy parameters failed",    //AE_FFMPEG_COPY_PARAMS_FAILED
+	"resampler init failed",            //AE_RESAMPLE_INIT_FAILED
+	"ffmpeg new out stream failed",     //AE_FFMPEG_NEW_STREAM_FAILED
+	"ffmpeg find input format failed",  //AE_FFMPEG_FIND_INPUT_FMT_FAILED
+	"ffmpeg write file header failed",  //AE_FFMPEG_WRITE_HEADER_FAILED
+	"ffmpeg write file trailer failed", //AE_FFMPEG_WRITE_TRAILER_FAILED
+	"ffmpeg write frame failed",        //AE_FFMPEG_WRITE_FRAME_FAILED
+
+	"avfilter alloc avfilter failed",   //AE_FILTER_ALLOC_GRAPH_FAILED
+	"avfilter create graph failed",     //AE_FILTER_CREATE_FILTER_FAILED
+	"avfilter parse ptr failed",        //AE_FILTER_PARSE_PTR_FAILED
+	"avfilter config graph failed",     //AE_FILTER_CONFIG_FAILED
+	"avfilter invalid ctx index",       //AE_FILTER_INVALID_CTX_INDEX
+	"avfilter add frame failed",        //AE_FILTER_ADD_FRAME_FAILED
+
+	"gdi get dc failed",                //AE_GDI_GET_DC_FAILED
+	"gdi create dc failed",             //AE_GDI_CREATE_DC_FAILED
+	"gdi create bmp failed",            //AE_GDI_CREATE_BMP_FAILED
+	"gdi bitblt failed",                //AE_GDI_BITBLT_FAILED
+	"gid geet dibbits failed",          //AE_GDI_GET_DIBITS_FAILED
+
+	"d3d11 library load failed",        //AE_D3D_LOAD_FAILED
+	"d3d11 proc get failed",            //AE_D3D_GET_PROC_FAILED
+	"d3d11 create device failed",       //AE_D3D_CREATE_DEVICE_FAILED
+	"d3d11 query interface failed",     //AE_D3D_QUERYINTERFACE_FAILED
+	"d3d11 create vertex shader failed",//AE_D3D_CREATE_VERTEX_SHADER_FAILED
+	"d3d11 create input layout failed", //AE_D3D_CREATE_INLAYOUT_FAILED
+	"d3d11 create pixel shader failed", //AE_D3D_CREATE_PIXEL_SHADER_FAILED
+	"d3d11 create sampler state failed",//AE_D3D_CREATE_SAMPLERSTATE_FAILED
+
+	"dxgi get proc address failed",     //AE_DXGI_GET_PROC_FAILED
+	"dxgi get adapter failed",          //AE_DXGI_GET_ADAPTER_FAILED
+	"dxgi get factory failed",          //AE_DXGI_GET_FACTORY_FAILED
+	"dxgi specified adapter not found", //AE_DXGI_FOUND_ADAPTER_FAILED
+
+	"duplication attatch desktop failed", //AE_DUP_ATTATCH_FAILED
+	"duplication query interface failed", //AE_DUP_QI_FAILED
+	"duplication get parent failed",      //AE_DUP_GET_PARENT_FAILED
+	"duplication enum ouput failed",      //AE_DUP_ENUM_OUTPUT_FAILED
+	"duplication duplicate unavailable",  //AE_DUP_DUPLICATE_MAX_FAILED
+	"duplication duplicate failed",       //AE_DUP_DUPLICATE_FAILED
+	"duplication release frame failed",   //AE_DUP_RELEASE_FRAME_FAILED
+	"duplication acquire frame failed",   //AE_DUP_ACQUIRE_FRAME_FAILED
+	"duplication qi frame failed",        //AE_DUP_QI_FRAME_FAILED
+	"duplication create texture failed",  //AE_DUP_CREATE_TEXTURE_FAILED
+	"duplication dxgi qi failed",         //AE_DUP_QI_DXGI_FAILED
+	"duplication map rects failed",       //AE_DUP_MAP_FAILED
+	"duplication get cursor shape failed",//AE_DUP_GET_CURSORSHAPE_FAILED
+
+	"remux is already running",           //AE_REMUX_RUNNING
+	"remux input file do not exist",      //AE_REMUX_NOT_EXIST
+	"remux input or output file invalid", //AE_REMUX_INVALID_INOUT
+};
+
+#define err2str(e) e < AE_MAX ? ERRORS_STR[e] : "unknown"
+
+#define AMERROR_CHECK(err) if(err != AE_NO) return err
+
+#endif // !ERROR_DEFINE

+ 501 - 0
libs/Recorder/export.cpp

@@ -0,0 +1,501 @@
+#include "export.h"
+
+#include "device_audios.h"
+#include "encoder_video_define.h"
+
+#include "record_audio_factory.h"
+#include "record_desktop_factory.h"
+
+#include "muxer_define.h"
+#include "muxer_ffmpeg.h"
+
+#include "remuxer_ffmpeg.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+#include "utils_string.h"
+
+#ifdef _WIN32
+#include "system_version.h"
+#endif
+
+#include <string>
+#include <atomic>
+#include <mutex>
+
+#define USE_DSHOW 0
+
+namespace am {
+typedef std::lock_guard<std::mutex> lock_guard;
+
+static const double scaled_vals[] = { 1.0,         1.25, (1.0 / 0.75), 1.5,
+	(1.0 / 0.6), 1.75, 2.0,          2.25,
+	2.5,         2.75, 3.0,          0.0 };
+
+class recorder {
+private:
+	recorder();
+
+	~recorder();
+
+public:
+	static recorder *instance();
+
+	static void release();
+
+	int init(const AMRECORDER_SETTING & setting, const AMRECORDER_CALLBACK &callbacks);
+
+	int start();
+
+	void stop();
+
+	int pause();
+
+	int resume();
+
+	void set_preview_enabled(bool enable);
+
+private:
+	void on_preview_yuv(const uint8_t *data, int size, int width, int height, int type);
+	void get_valid_out_resolution(int src_width, int src_height, int *out_width, int *out_height);
+private:
+	AMRECORDER_SETTING _setting;
+	AMRECORDER_CALLBACK _callbacks;
+
+	record_audio *_recorder_speaker;
+	record_audio *_recorder_mic;
+	record_desktop *_recorder_desktop;
+
+	muxer_file *_muxer;
+
+	std::atomic_bool _inited;
+	std::mutex _mutex;
+};
+
+static recorder *_g_instance = nullptr;
+static std::mutex _g_mutex;
+
+recorder::recorder() {
+	memset(&_setting, 0, sizeof(_setting));
+	memset(&_callbacks, 0, sizeof(_callbacks));
+
+	_recorder_speaker = nullptr;
+	_recorder_mic = nullptr;
+	_recorder_desktop = nullptr;
+
+	_inited = false;
+	_muxer = nullptr;
+}
+
+recorder::~recorder() {
+	if (_muxer)
+		delete _muxer;
+
+	if (_recorder_desktop)
+		delete _recorder_desktop;
+
+	if (_recorder_mic)
+		delete _recorder_mic;
+
+	if (_recorder_speaker)
+		delete _recorder_speaker;
+}
+
+recorder * recorder::instance() {
+	lock_guard lock(_g_mutex);
+
+	if (_g_instance == nullptr) _g_instance = new recorder();
+
+	return _g_instance;
+}
+
+void recorder::release()
+{
+	lock_guard lock(_g_mutex);
+
+	if (_g_instance)
+		delete _g_instance;
+
+	_g_instance = nullptr;
+}
+
+int recorder::init(const AMRECORDER_SETTING & setting, const AMRECORDER_CALLBACK & callbacks)
+{
+	lock_guard lock(_mutex);
+	if (_inited == true)
+		return AE_NO;
+
+	int error = AE_NO;
+	int audio_num = 0;
+
+	_setting = setting;
+	_callbacks = callbacks;
+
+	am::record_audio *audios[2] = { 0 };
+
+#if USE_DSHOW
+
+	error = record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_DSHOW, &_recorder_speaker);
+	AMERROR_CHECK(error);
+
+	error = _recorder_speaker->init("audio=virtual-audio-capturer", "audio=virtual-audio-capturer", false);
+	AMERROR_CHECK(error);
+
+	error = record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_DSHOW, &_recorder_mic);
+	AMERROR_CHECK(error);
+
+	error = _recorder_mic->init(std::string("audio=") + std::string(setting.a_mic.name), std::string("audio=") + std::string(setting.a_mic.name), true);
+	AMERROR_CHECK(error);
+
+	audios = { _recorder_speaker,_recorder_mic };
+#else
+	if (utils_string::utf8_ascii(setting.a_speaker.name).length() && utils_string::utf8_ascii(setting.a_speaker.id).length()) {
+		error = record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_speaker);
+		AMERROR_CHECK(error);
+
+		error = _recorder_speaker->init(setting.a_speaker.name, setting.a_speaker.id, false);
+		AMERROR_CHECK(error);
+
+		audios[audio_num] = _recorder_speaker;
+		audio_num++;
+	}
+
+
+
+	if (utils_string::utf8_ascii(setting.a_mic.name).length() && utils_string::utf8_ascii(setting.a_mic.id).length()) {
+		error = record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_mic);
+		AMERROR_CHECK(error);
+
+		error = _recorder_mic->init(setting.a_mic.name, setting.a_mic.id, true);
+		AMERROR_CHECK(error);
+
+		audios[audio_num] = _recorder_mic;
+		audio_num++;
+	}
+#endif 
+
+#ifdef _WIN32
+#if USE_MAG
+  if (_recorder_desktop == nullptr) {
+                error =
+                    record_desktop_new(RECORD_DESKTOP_TYPES::DT_DESKTOP_WIN_MAG,
+                                  &_recorder_desktop);
+    if (error == AE_NO) {
+      error = _recorder_desktop->init(
+          {setting.v_left, setting.v_top,
+            setting.v_width + setting.v_left,
+            setting.v_height + setting.v_top},
+          setting.v_frame_rate);
+
+      if (error != AE_NO)
+        record_desktop_destroy(&_recorder_desktop);
+    }
+  }
+#endif
+
+	// windows support wgc since win10.1803
+  if (_recorder_desktop == nullptr && system_version::is_win10_or_above(17134)) {
+    error = record_desktop_new(RECORD_DESKTOP_TYPES::DT_DESKTOP_WIN_WGC,
+        &_recorder_desktop);
+    if (error == AE_NO) {
+      error = _recorder_desktop->init({setting.v_left, setting.v_top,
+                                        setting.v_width + setting.v_left,
+                                        setting.v_height + setting.v_top},
+                                      setting.v_frame_rate);
+
+      if (error != AE_NO)
+        record_desktop_destroy(&_recorder_desktop);
+    }
+	}
+
+  if (_recorder_desktop == nullptr &&
+      system_version::is_win8_or_above()) {
+		error = record_desktop_new(RECORD_DESKTOP_TYPES::DT_DESKTOP_WIN_DUPLICATION, &_recorder_desktop);
+		if (error == AE_NO) {
+
+			error = _recorder_desktop->init(
+			{
+				setting.v_left,setting.v_top,setting.v_width + setting.v_left,setting.v_height + setting.v_top
+			},
+				setting.v_frame_rate
+			);
+
+			if (error != AE_NO)
+				record_desktop_destroy(&_recorder_desktop);
+		}
+	}
+
+	if (_recorder_desktop == nullptr) {
+		error = record_desktop_new(RECORD_DESKTOP_TYPES::DT_DESKTOP_WIN_GDI, &_recorder_desktop);
+		AMERROR_CHECK(error);
+
+		error = _recorder_desktop->init(
+		{
+			setting.v_left,setting.v_top,setting.v_width + setting.v_left,setting.v_height + setting.v_top
+		},
+			setting.v_frame_rate
+		);
+
+		AMERROR_CHECK(error);
+	}
+#endif // _WIN32
+
+	am::MUX_SETTING mux_setting;
+	mux_setting.v_frame_rate = setting.v_frame_rate;
+	mux_setting.v_bit_rate = setting.v_bit_rate;
+	mux_setting.v_width = setting.v_width;
+	mux_setting.v_height = setting.v_height;
+	mux_setting.v_qb = setting.v_qb;
+	mux_setting.v_encoder_id = (am::ENCODER_VIDEO_ID)setting.v_enc_id;
+
+	get_valid_out_resolution(setting.v_width, setting.v_height, &mux_setting.v_out_width, &mux_setting.v_out_height);
+
+	mux_setting.a_nb_channel = 2;
+	mux_setting.a_sample_fmt = AV_SAMPLE_FMT_FLTP;
+	mux_setting.a_sample_rate = 44100;
+	mux_setting.a_bit_rate = 128000;
+
+
+
+	_muxer = new muxer_ffmpeg();
+
+	_muxer->registe_yuv_data(std::bind(
+		&recorder::on_preview_yuv,
+		this,
+		std::placeholders::_1,
+		std::placeholders::_2,
+		std::placeholders::_3,
+		std::placeholders::_4,
+		std::placeholders::_5
+	));
+
+	error = _muxer->init(setting.output, _recorder_desktop, audios, audio_num, mux_setting);
+	AMERROR_CHECK(error);
+
+	_inited = true;
+
+	return error;
+}
+
+int recorder::start()
+{
+	lock_guard lock(_mutex);
+	if (_inited == false)
+		return AE_NEED_INIT;
+
+	int error = _muxer->start();
+
+	return error;
+}
+
+void recorder::stop()
+{
+	lock_guard lock(_mutex);
+	if (_inited == false)
+		return;
+
+	_muxer->stop();
+}
+
+int recorder::pause()
+{
+	lock_guard lock(_mutex);
+	if (_inited == false)
+		return AE_NEED_INIT;
+
+	return _muxer->pause();
+}
+
+int recorder::resume()
+{
+	lock_guard lock(_mutex);
+	if (_inited == false)
+		return AE_NEED_INIT;
+
+	return _muxer->resume();
+}
+
+void recorder::set_preview_enabled(bool enable)
+{
+	lock_guard lock(_mutex);
+	if (_inited == false)
+		return;
+
+	_muxer->set_preview_enabled(enable);
+}
+
+void recorder::on_preview_yuv(const uint8_t * data, int size, int width, int height, int type)
+{
+	if (_callbacks.func_preview_yuv != NULL)
+		_callbacks.func_preview_yuv(data, size, width, height, type);
+}
+void recorder::get_valid_out_resolution(int src_width, int src_height, int * out_width, int * out_height)
+{
+	int scale_cx = src_width;
+	int scale_cy = src_height;
+
+	int i = 0;
+
+	while (((scale_cx * scale_cy) > (1920 * 1080)) && scaled_vals[i] > 0.0) {
+		double scale = scaled_vals[i++];
+		scale_cx = uint32_t(double(src_width) / scale);
+		scale_cy = uint32_t(double(src_height) / scale);
+	}
+
+	if (scale_cx % 2 != 0) {
+		scale_cx += 1;
+	}
+
+	if (scale_cy % 2 != 0) {
+		scale_cy += 1;
+	}
+
+	*out_width = scale_cx;
+	*out_height = scale_cy;
+
+	al_info("get valid output resolution from %dx%d to %dx%d,with scale:%lf", src_width, src_height, scale_cx, scale_cy, scaled_vals[i]);
+}
+}
+
+AMRECORDER_API const char * recorder_err2str(int error)
+{
+	return am::utils_string::ascii_utf8(err2str(error)).c_str();
+}
+
+AMRECORDER_API int recorder_init(const AMRECORDER_SETTING & setting, const AMRECORDER_CALLBACK & callbacks)
+{
+	return am::recorder::instance()->init(setting, callbacks);
+}
+
+AMRECORDER_API void recorder_release()
+{
+	return am::recorder::instance()->release();
+}
+
+AMRECORDER_API int recorder_start()
+{
+	return am::recorder::instance()->start();
+}
+
+AMRECORDER_API void recorder_stop()
+{
+	return am::recorder::instance()->stop();
+}
+
+AMRECORDER_API int recorder_pause()
+{
+	return am::recorder::instance()->pause();
+}
+
+AMRECORDER_API int recorder_resume()
+{
+	return am::recorder::instance()->resume();
+}
+
+AMRECORDER_API int recorder_get_speakers(AMRECORDER_DEVICE ** devices)
+{
+	std::list<am::DEVICE_AUDIOS> device_list;
+
+	int error = am::device_audios::get_output_devices(device_list);
+	if (error != AE_NO) return -error;
+
+	int count = device_list.size();
+
+	*devices = new AMRECORDER_DEVICE[count];
+
+	int index = 0;
+	for each (auto device in device_list)
+	{
+		al_info("audio input name:%s id:%s", device.name.c_str(), device.id.c_str());
+
+		(*devices)[index].is_default = device.is_default;
+		sprintf_s((*devices)[index].id, 260, "%s", device.id.c_str());
+		sprintf_s((*devices)[index].name, 260, "%s", device.name.c_str());
+
+		index++;
+	}
+
+	return count;
+}
+
+AMRECORDER_API int recorder_get_mics(AMRECORDER_DEVICE ** devices)
+{
+	std::list<am::DEVICE_AUDIOS> device_list;
+
+	int error = am::device_audios::get_input_devices(device_list);
+	if (error != AE_NO) return -error;
+
+	int count = device_list.size();
+
+	*devices = new AMRECORDER_DEVICE[count];
+
+	int index = 0;
+	for each (auto device in device_list)
+	{
+		al_info("audio output name:%s id:%s", device.name.c_str(), device.id.c_str());
+
+		(*devices)[index].is_default = device.is_default;
+		sprintf_s((*devices)[index].id, 260, "%s", device.id.c_str());
+		sprintf_s((*devices)[index].name, 260, "%s", device.name.c_str());
+
+		index++;
+	}
+
+	return count;
+}
+
+AMRECORDER_API int recorder_get_cameras(AMRECORDER_DEVICE ** devices)
+{
+	return -AE_UNSUPPORT;
+}
+
+AMRECORDER_API int recorder_get_vencoders(AMRECORDER_ENCODERS ** encoders)
+{
+	auto hw_encoders = am::hardware_acceleration::get_supported_video_encoders();
+
+	int count = hw_encoders.size() + 1;
+	*encoders = new AMRECORDER_ENCODERS[count];
+
+	AMRECORDER_ENCODERS *ptr = *encoders;
+	ptr->id = am::EID_VIDEO_X264;
+	sprintf_s(ptr->name, 260, am::utils_string::ascii_utf8("Soft.X264").c_str());
+
+	for each (auto hw_encoder in hw_encoders)
+	{
+		ptr++;
+		ptr->id = hw_encoder.type;
+		sprintf_s(ptr->name, 260, "%s", hw_encoder.name);
+	}
+
+	return count;
+}
+
+AMRECORDER_API void recorder_free_array(void * array_address)
+{
+	if (array_address != nullptr)
+		delete[]array_address;
+}
+
+AMRECORDER_API int recorder_remux(const char * src, const char * dst, AMRECORDER_FUNC_REMUX_PROGRESS func_progress, AMRECORDER_FUNC_REMUX_STATE func_state)
+{
+	am::REMUXER_PARAM param = { 0 };
+
+	sprintf_s(param.src, 260, "%s", am::utils_string::utf8_ascii(src).c_str());
+	sprintf_s(param.dst, 260, "%s", am::utils_string::utf8_ascii(dst).c_str());
+
+	param.cb_progress = func_progress;
+
+	param.cb_state = func_state;
+
+	return am::remuxer_ffmpeg::instance()->create_remux(param);
+}
+
+AMRECORDER_API void recorder_set_preview_enabled(int enable)
+{
+	am::recorder::instance()->set_preview_enabled(enable == 1);
+}
+
+AMRECORDER_API void recorder_set_logpath(const char * path)
+{
+	AMLog *log = AMLog::get(path);
+}

+ 295 - 0
libs/Recorder/export.h

@@ -0,0 +1,295 @@
+#ifndef RECORDER_EXPORT
+#define RECORDER_EXPORT
+
+#include <stdint.h>
+
+#ifdef AMRECORDER_IMPORT
+#define AMRECORDER_API extern "C"  __declspec(dllimport)
+#else
+#define AMRECORDER_API extern "C"  __declspec(dllexport)
+#endif
+
+
+/**
+* AMRECORDER_DEVICE
+*/
+#pragma pack(push,1)
+typedef struct {
+	/**
+	* Device id in utf8
+	*/
+	char id[260];
+
+	/**
+	* Device name in utf8
+	*/
+	char name[260];
+
+	/**
+	* Is default device
+	*/
+	uint8_t is_default;
+}AMRECORDER_DEVICE;
+#pragma pack(pop)
+
+/**
+* AMRECORDER_SETTING
+*/
+#pragma pack(push,1)
+typedef struct {
+
+	/**
+	* Left of desktop area
+	*/
+	int v_left;
+
+	/**
+	* Top of desktop area
+	*/
+	int v_top;
+
+	/**
+	* Width of desktop area
+	*/
+	int v_width;
+
+	/**
+	* Height of desktop area
+	*/
+	int v_height;
+
+	/**
+	* Output video quality, value must be between 0 and 100, 0 is least, 100 is best
+	*/
+	int v_qb;
+
+	/**
+	* Output video bitrate, the larger value you set,
+	* the better video quality you get, but the file size is also larger.
+	* Suggestion: 960|1280|2500 *1000
+	*/
+	int v_bit_rate;
+
+	/**
+	* FPS(frame per second)
+	*/
+	int v_frame_rate;
+
+	/**
+	* Video encoder id
+	* Must get by recorder_get_vencoders
+	*/
+	int v_enc_id;
+
+	/**
+	* Output file path,the output file format is depended on the ext name.
+	* Support .mp4|.mkv for now.
+	*/
+	char output[260];
+
+	/**
+	* Desktop device
+	* Unused
+	*/
+	AMRECORDER_DEVICE v_device;
+
+	/**
+	* Microphone device info
+	*/
+	AMRECORDER_DEVICE a_mic;
+
+	/**
+	* Speaker device info
+	*/
+	AMRECORDER_DEVICE a_speaker;
+}AMRECORDER_SETTING;
+#pragma pack(pop)
+
+/**
+* AMRECORDER_ENCODERS
+*/
+#pragma pack(push,1)
+typedef struct {
+
+	/**
+	* Encoder id
+	*/
+	int id;
+
+	/**
+	* Encoder name
+	*/
+	char name[260];
+}AMRECORDER_ENCODERS;
+#pragma pack(pop)
+
+/**
+* Recording duration callback function
+* @param[in] duration time in millisecond
+*/
+typedef void(*AMRECORDER_FUNC_DURATION)(uint64_t duration);
+
+/**
+* Recording error callback function
+* Should call recorder_err2str to get stringify error info
+* @param[in] error
+*/
+typedef void(*AMRECORDER_FUNC_ERROR)(int error);
+
+/**
+* Device changed callback function
+* Should refresh devices
+* @param[in] type 0 for video, 1 for speaker, 2 for microphone
+*/
+typedef void(*AMRECORDER_FUNC_DEVICE_CHANGE)(int type);
+
+/**
+* YUV data callback function
+* Should refresh devices
+* @param[in] data   yuv buffer
+* @param[in] size   yuv buffer size
+* @param[in] width  picture with
+* @param[in] height picture height
+* @param[in] type   yuv type, 0 for 420, 1 fro 444
+*/
+typedef void(*AMRECORDER_FUNC_PREVIEW_YUV)(
+	const unsigned char *data,
+	unsigned int size,
+	int width,
+	int height,
+	int type
+	);
+
+/**
+* Unused callback function
+*/
+typedef void(*AMRECORDER_FUNC_PREVIEW_AUDIO)();
+
+/**
+* Remux progress callback function
+* @param[in] path       source file path
+* @param[in] progress   remuxing progress in total
+* @param[in] total      always will be 100
+*/
+typedef void(*AMRECORDER_FUNC_REMUX_PROGRESS)(const char *path, int progress, int total);
+
+/**
+* Remux state callback function
+* @param[in] path    source file path
+* @param[in] state   0 for unremuxing,1 for remuxing
+* @param[in] error   0 for succed,otherwhise error code
+*/
+typedef void(*AMRECORDER_FUNC_REMUX_STATE)(const char *path, int state, int error);
+
+/**
+* Callback functions structure
+*/
+#pragma pack(push,1)
+typedef struct {
+	AMRECORDER_FUNC_DURATION func_duration;
+	AMRECORDER_FUNC_ERROR func_error;
+	AMRECORDER_FUNC_DEVICE_CHANGE func_device_change;
+	AMRECORDER_FUNC_PREVIEW_YUV func_preview_yuv;
+	AMRECORDER_FUNC_PREVIEW_AUDIO func_preview_audio;
+}AMRECORDER_CALLBACK;
+#pragma pack(pop)
+
+/**
+* Get error string by specified error code
+* @return error string
+*/
+AMRECORDER_API const char * recorder_err2str(int error);
+
+/**
+* Initialize recorder with specified seetings¡¢speaker¡¢mic¡¢encoder...
+* @return 0 if succeed,error code otherwise
+*/
+AMRECORDER_API int recorder_init(const AMRECORDER_SETTING &setting, const AMRECORDER_CALLBACK &callbacks);
+
+/**
+* Release all recorder resources
+*/
+AMRECORDER_API void recorder_release();
+
+/**
+* Start recording
+* @return 0 if succeed,error code otherwise
+*/
+AMRECORDER_API int recorder_start();
+
+/**
+* Stop recording
+*/
+AMRECORDER_API void recorder_stop();
+
+/**
+* Pause recording
+* @return 0 if succeed,error code otherwise
+*/
+AMRECORDER_API int recorder_pause();
+
+/**
+* Resume recording
+* @return 0 if succeed,error code otherwise
+*/
+AMRECORDER_API int recorder_resume();
+
+/**
+* Get valid speaker devices
+* @param[in] devices a pointer to a device array,should call recorder_free_array to free memory
+* @return count of speakers
+*/
+AMRECORDER_API int recorder_get_speakers(AMRECORDER_DEVICE **devices);
+
+/**
+* Get valid mic devices
+* @param[in] devices a pointer to a device array,should call recorder_free_array to free memory
+* @return count of mics
+*/
+AMRECORDER_API int recorder_get_mics(AMRECORDER_DEVICE **devices);
+
+/**
+* Get valid camera devices
+* @param[in] devices a pointer to a device array,should call recorder_free_array to free memory
+* @return count of cameras
+*/
+AMRECORDER_API int recorder_get_cameras(AMRECORDER_DEVICE **devices);
+
+/**
+* Get valid encoders
+* @param[in] encoders a pointer to a encoder array,should call recorder_free_array to free memory
+* @return count of encoders
+*/
+AMRECORDER_API int recorder_get_vencoders(AMRECORDER_ENCODERS **encoders);
+
+/**
+* Free memory allocate by recorder
+* @param[in] array_address the pointer of array buffer
+*/
+AMRECORDER_API void recorder_free_array(void *array_address);
+
+/**
+* Recorder create a remux job
+* @param[in] src    source file path
+* @param[in] dst   0 for unremuxing,1 for remuxing
+* @param[in] func_progress   0 for succed,otherwhise error code
+* @param[in] func_state   0 for succed,otherwhise error code
+*/
+AMRECORDER_API int recorder_remux(
+	const char *src, const char *dst,
+	AMRECORDER_FUNC_REMUX_PROGRESS func_progress,
+	AMRECORDER_FUNC_REMUX_STATE func_state);
+
+/**
+* Enable or disable preview include video and audio
+* @param[in] enable 1 for enable,0 for disable
+*/
+AMRECORDER_API void recorder_set_preview_enabled(int enable);
+
+
+/**
+* Set log path
+* @param[in] log file path
+*/
+AMRECORDER_API void recorder_set_logpath(const char* path);
+#endif

+ 13 - 0
libs/Recorder/filter.cpp

@@ -0,0 +1,13 @@
+#include "filter.h"
+
+namespace am {
+	void format_pad_arg(char *arg, int size, const FILTER_CTX &ctx)
+	{
+		sprintf_s(arg, size, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x",
+			ctx.time_base.num,
+			ctx.time_base.den,
+			ctx.sample_rate,
+			av_get_sample_fmt_name(ctx.sample_fmt),
+			av_get_default_channel_layout(ctx.nb_channel));
+	}
+}

+ 44 - 0
libs/Recorder/filter.h

@@ -0,0 +1,44 @@
+#ifndef FILTER
+#define FILTER
+
+#include <thread>
+#include <atomic>
+#include <functional>
+#include <string>
+#include <mutex>
+#include <condition_variable>
+
+#include "headers_ffmpeg.h"
+
+namespace am {
+
+	typedef struct {
+		AVFilterContext *ctx;
+		AVFilterInOut *inout;
+
+		AVRational time_base;
+		int sample_rate;
+		AVSampleFormat sample_fmt;
+		int nb_channel;
+		int64_t channel_layout;
+	}FILTER_CTX;
+
+	/**
+	* filter data callback
+	* @param frame pointer to a AVFrame
+	* @param index resource index ,default is -1
+	*/
+	typedef std::function<void(AVFrame *frame, int index)> on_filter_data;
+
+	/**
+	* filter error callback
+	* @param code error code
+	* @param index resource index ,default is -1
+	*/
+	typedef std::function<void(int code, int index)> on_filter_error;
+
+	void format_pad_arg(char *arg, int size, const FILTER_CTX &ctx);
+}
+
+#endif // !FILTER
+

+ 268 - 0
libs/Recorder/filter_amix.cpp

@@ -0,0 +1,268 @@
+#include "filter_amix.h"
+
+#include <chrono>
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	static void print_frame(const AVFrame *frame, int index)
+	{
+		al_debug("index:%d %lld %d", index, frame->pts, frame->nb_samples);
+	}
+
+	filter_amix::filter_amix()
+	{
+		av_register_all();
+		avfilter_register_all();
+
+		memset(&_ctx_in_0, 0, sizeof(FILTER_CTX));
+		memset(&_ctx_in_1, 0, sizeof(FILTER_CTX));
+		memset(&_ctx_out, 0, sizeof(FILTER_CTX));
+
+		_filter_graph = NULL;
+
+		_inited = false;
+		_running = false;
+
+		_cond_notify = false;
+
+	}
+
+
+	filter_amix::~filter_amix()
+	{
+		stop();
+		cleanup();
+	}
+
+	int filter_amix::init(const FILTER_CTX & ctx_in0, const FILTER_CTX & ctx_in1, const FILTER_CTX & ctx_out)
+	{
+		int error = AE_NO;
+		int ret = 0;
+
+		if (_inited) return AE_NO;
+
+		do {
+			_ctx_in_0 = ctx_in0;
+			_ctx_in_1 = ctx_in1;
+			_ctx_out = ctx_out;
+
+			_filter_graph = avfilter_graph_alloc();
+			if (!_filter_graph) {
+				error = AE_FILTER_ALLOC_GRAPH_FAILED;
+				break;
+			}
+
+			const std::string filter_desrc = "[in0][in1]amix=inputs=2:duration=first:dropout_transition=0[out]";
+
+			_ctx_in_0.inout = avfilter_inout_alloc();
+			_ctx_in_1.inout = avfilter_inout_alloc();
+			_ctx_out.inout = avfilter_inout_alloc();
+
+			char pad_args0[512] = { 0 }, pad_args1[512] = { 0 };
+
+			format_pad_arg(pad_args0, 512, _ctx_in_0);
+			format_pad_arg(pad_args1, 512, _ctx_in_1);
+
+			ret = avfilter_graph_create_filter(&_ctx_in_0.ctx, avfilter_get_by_name("abuffer"), "in0", pad_args0, NULL, _filter_graph);
+			if (ret < 0) {
+				error = AE_FILTER_CREATE_FILTER_FAILED;
+				break;
+			}
+
+			ret = avfilter_graph_create_filter(&_ctx_in_1.ctx, avfilter_get_by_name("abuffer"), "in1", pad_args1, NULL, _filter_graph);
+			if (ret < 0) {
+				error = AE_FILTER_CREATE_FILTER_FAILED;
+				break;
+			}
+
+			ret = avfilter_graph_create_filter(&_ctx_out.ctx, avfilter_get_by_name("abuffersink"), "out", NULL, NULL, _filter_graph);
+			if (ret < 0) {
+				error = AE_FILTER_CREATE_FILTER_FAILED;
+				break;
+			}
+
+			av_opt_set_bin(_ctx_out.ctx, "sample_fmts", (uint8_t*)&_ctx_out.sample_fmt, sizeof(_ctx_out.sample_fmt), AV_OPT_SEARCH_CHILDREN);
+			av_opt_set_bin(_ctx_out.ctx, "channel_layouts", (uint8_t*)&_ctx_out.channel_layout, sizeof(_ctx_out.channel_layout), AV_OPT_SEARCH_CHILDREN);
+			av_opt_set_bin(_ctx_out.ctx, "sample_rates", (uint8_t*)&_ctx_out.sample_rate, sizeof(_ctx_out.sample_rate), AV_OPT_SEARCH_CHILDREN);
+
+			_ctx_in_0.inout->name = av_strdup("in0");
+			_ctx_in_0.inout->filter_ctx = _ctx_in_0.ctx;
+			_ctx_in_0.inout->pad_idx = 0;
+			_ctx_in_0.inout->next = _ctx_in_1.inout;
+
+			_ctx_in_1.inout->name = av_strdup("in1");
+			_ctx_in_1.inout->filter_ctx = _ctx_in_1.ctx;
+			_ctx_in_1.inout->pad_idx = 0;
+			_ctx_in_1.inout->next = NULL;
+
+			_ctx_out.inout->name = av_strdup("out");
+			_ctx_out.inout->filter_ctx = _ctx_out.ctx;
+			_ctx_out.inout->pad_idx = 0;
+			_ctx_out.inout->next = NULL;
+
+			AVFilterInOut *inoutputs[2] = { _ctx_in_0.inout,_ctx_in_1.inout };
+
+			ret = avfilter_graph_parse_ptr(_filter_graph, filter_desrc.c_str(), &_ctx_out.inout, inoutputs, NULL);
+			if (ret < 0) {
+				error = AE_FILTER_PARSE_PTR_FAILED;
+				break;
+			}
+
+			ret = avfilter_graph_config(_filter_graph, NULL);
+			if (ret < 0) {
+				error = AE_FILTER_CONFIG_FAILED;
+				break;
+			}
+
+			//al_debug("dump graph:\r\n%s", avfilter_graph_dump(_filter_graph, NULL));
+
+			_inited = true;
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("filter init failed:%s %d", err2str(error), ret);
+			cleanup();
+		}
+
+		//if (_ctx_in_0.inout)
+		//	avfilter_inout_free(&_ctx_in_0.inout);
+
+		//if (_ctx_in_1.inout)
+		//	avfilter_inout_free(&_ctx_in_1.inout);
+
+		//if (_ctx_out.inout)
+		//	avfilter_inout_free(&_ctx_out.inout);
+
+		return error;
+	}
+
+	int filter_amix::start()
+	{
+		if (!_inited)
+			return AE_NEED_INIT;
+
+		if (_running)
+			return AE_NO;
+
+		_running = true;
+		_thread = std::thread(std::bind(&filter_amix::filter_loop, this));
+
+		return 0;
+	}
+
+	int filter_amix::stop()
+	{
+		if (!_inited || !_running)
+			return AE_NO;
+
+		_running = false;
+
+		_cond_notify = true;
+		_cond_var.notify_all();
+
+		if (_thread.joinable())
+			_thread.join();
+
+		return AE_NO;
+	}
+
+	int filter_amix::add_frame(AVFrame * frame, int index)
+	{
+		std::unique_lock<std::mutex> lock(_mutex);
+
+		int error = AE_NO;
+		int ret = 0;
+
+		do {
+			AVFilterContext *ctx = NULL;
+			switch (index) {
+			case 0:
+				ctx = _ctx_in_0.ctx;
+				break;
+			case 1:
+				ctx = _ctx_in_1.ctx;
+				break;
+			default:
+				ctx = NULL;
+				break;
+			}
+
+			if (!ctx) {
+				error = AE_FILTER_INVALID_CTX_INDEX;
+				break;
+			}
+
+			//print_frame(frame, index);
+			int ret = av_buffersrc_add_frame_flags(ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
+			if (ret < 0) {
+				error = AE_FILTER_ADD_FRAME_FAILED;
+				break;
+			}
+
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("add frame failed:%s ,%d", err2str(error), ret);
+		}
+
+		_cond_notify = true;
+		_cond_var.notify_all();
+
+		return error;
+	}
+
+	const AVRational filter_amix::get_time_base()
+	{
+		return av_buffersink_get_time_base(_ctx_out.ctx);
+	}
+
+	void filter_amix::cleanup()
+	{
+		if (_filter_graph)
+			avfilter_graph_free(&_filter_graph);
+
+		memset(&_ctx_in_0, 0, sizeof(FILTER_CTX));
+		memset(&_ctx_in_1, 0, sizeof(FILTER_CTX));
+		memset(&_ctx_out, 0, sizeof(FILTER_CTX));
+
+		_inited = false;
+	}
+
+	void filter_amix::filter_loop()
+	{
+		AVFrame *frame = av_frame_alloc();
+
+		int ret = 0;
+		while (_running) {
+			std::unique_lock<std::mutex> lock(_mutex);
+			while (!_cond_notify && _running)
+				_cond_var.wait_for(lock,std::chrono::milliseconds(300));
+
+			while (_running && _cond_notify) {
+				ret = av_buffersink_get_frame(_ctx_out.ctx, frame);
+				if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+					break;;
+				}
+
+				if (ret < 0) {
+					al_fatal("avfilter get frame error:%d", ret);
+					if (_on_filter_error) _on_filter_error(ret, -1);
+					break;
+				}
+
+				if (_on_filter_data)
+					_on_filter_data(frame, -1);
+
+				av_frame_unref(frame);
+			}
+
+			_cond_notify = false;
+		}
+
+		av_frame_free(&frame);
+	}
+
+}

+ 57 - 0
libs/Recorder/filter_amix.h

@@ -0,0 +1,57 @@
+#ifndef FILTER_AMIX
+#define FILTER_AMIX
+
+#include "filter.h"
+
+
+namespace am {
+	class filter_amix
+	{
+	public:
+		filter_amix();
+		~filter_amix();
+
+		int init(const FILTER_CTX &ctx_in0, const FILTER_CTX &ctx_in1, const FILTER_CTX &ctx_out);
+
+		inline void registe_cb(on_filter_data cb_on_filter_data, on_filter_error cb_on_filter_error) {
+			_on_filter_data = cb_on_filter_data;
+			_on_filter_error = cb_on_filter_error;
+		}
+
+		int start();
+
+		int stop();
+
+		int add_frame(AVFrame *frame, int index);
+
+		const AVRational get_time_base();
+
+	private:
+		void cleanup();
+		void filter_loop();
+
+
+
+	private:
+		FILTER_CTX _ctx_in_0;
+		FILTER_CTX _ctx_in_1;
+		FILTER_CTX _ctx_out;
+
+		AVFilterGraph *_filter_graph;
+
+		on_filter_data _on_filter_data;
+		on_filter_error _on_filter_error;
+
+		std::atomic_bool _inited;
+		std::atomic_bool _running;
+
+		std::thread _thread;
+
+		std::mutex _mutex;
+		std::condition_variable _cond_var;
+		bool _cond_notify;
+	};
+
+}
+
+#endif

+ 231 - 0
libs/Recorder/filter_aresample.cpp

@@ -0,0 +1,231 @@
+#include "filter_aresample.h"
+
+#include <chrono>
+#include <sstream>
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	filter_aresample::filter_aresample()
+	{
+		av_register_all();
+		avfilter_register_all();
+
+		memset(&_ctx_in, 0, sizeof(FILTER_CTX));
+		memset(&_ctx_out, 0, sizeof(FILTER_CTX));
+
+		_filter_graph = NULL;
+
+		_inited = false;
+		_running = false;
+
+		_cond_notify = false;
+
+		_index = -1;
+	}
+
+
+	filter_aresample::~filter_aresample()
+	{
+		stop();
+		cleanup();
+	}
+
+	int filter_aresample::init(const FILTER_CTX & ctx_in, const FILTER_CTX & ctx_out, int index)
+	{
+		int error = AE_NO;
+		int ret = 0;
+
+		if (_inited) return AE_NO;
+
+		_index = index;
+
+		do {
+			_ctx_in = ctx_in;
+			_ctx_out = ctx_out;
+
+			_filter_graph = avfilter_graph_alloc();
+			if (!_filter_graph) {
+				error = AE_FILTER_ALLOC_GRAPH_FAILED;
+				break;
+			}
+
+			char layout_name[256] = { 0 };
+			av_get_channel_layout_string(layout_name, 256, ctx_out.nb_channel, ctx_out.channel_layout);
+
+
+			std::stringstream filter_desrcss;
+			filter_desrcss << "aresample=";
+			filter_desrcss << ctx_out.sample_rate;
+			filter_desrcss << ",aformat=sample_fmts=";
+			filter_desrcss << av_get_sample_fmt_name(ctx_out.sample_fmt);
+			filter_desrcss << ":channel_layouts=";
+			filter_desrcss << layout_name;
+
+			std::string filter_desrc = filter_desrcss.str();
+
+			_ctx_in.inout = avfilter_inout_alloc();
+			_ctx_out.inout = avfilter_inout_alloc();
+
+			char pad_args[512] = { 0 };
+
+			format_pad_arg(pad_args, 512, _ctx_in);
+
+			ret = avfilter_graph_create_filter(&_ctx_in.ctx, avfilter_get_by_name("abuffer"), "in", pad_args, NULL, _filter_graph);
+			if (ret < 0) {
+				error = AE_FILTER_CREATE_FILTER_FAILED;
+				break;
+			}
+
+			ret = avfilter_graph_create_filter(&_ctx_out.ctx, avfilter_get_by_name("abuffersink"), "out", NULL, NULL, _filter_graph);
+			if (ret < 0) {
+				error = AE_FILTER_CREATE_FILTER_FAILED;
+				break;
+			}
+
+			av_opt_set_bin(_ctx_out.ctx, "sample_fmts", (uint8_t*)&_ctx_out.sample_fmt, sizeof(_ctx_out.sample_fmt), AV_OPT_SEARCH_CHILDREN);
+			av_opt_set_bin(_ctx_out.ctx, "channel_layouts", (uint8_t*)&_ctx_out.channel_layout, sizeof(_ctx_out.channel_layout), AV_OPT_SEARCH_CHILDREN);
+			av_opt_set_bin(_ctx_out.ctx, "sample_rates", (uint8_t*)&_ctx_out.sample_rate, sizeof(_ctx_out.sample_rate), AV_OPT_SEARCH_CHILDREN);
+
+			_ctx_in.inout->name = av_strdup("in");
+			_ctx_in.inout->filter_ctx = _ctx_in.ctx;
+			_ctx_in.inout->pad_idx = 0;
+			_ctx_in.inout->next = NULL;
+
+			_ctx_out.inout->name = av_strdup("out");
+			_ctx_out.inout->filter_ctx = _ctx_out.ctx;
+			_ctx_out.inout->pad_idx = 0;
+			_ctx_out.inout->next = NULL;
+
+			ret = avfilter_graph_parse_ptr(_filter_graph, filter_desrc.c_str(), &_ctx_out.inout, &_ctx_in.inout, NULL);
+			if (ret < 0) {
+				error = AE_FILTER_PARSE_PTR_FAILED;
+				break;
+			}
+
+			ret = avfilter_graph_config(_filter_graph, NULL);
+			if (ret < 0) {
+				error = AE_FILTER_CONFIG_FAILED;
+				break;
+			}
+
+			_inited = true;
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("filter init failed:%s %d", err2str(error), ret);
+			cleanup();
+		}
+
+		return error;
+	}
+
+	int filter_aresample::start()
+	{
+		if (!_inited)
+			return AE_NEED_INIT;
+
+		if (_running)
+			return AE_NO;
+
+		_running = true;
+		_thread = std::thread(std::bind(&filter_aresample::filter_loop, this));
+
+		return 0;
+	}
+
+	int filter_aresample::stop()
+	{
+		if (!_inited || !_running)
+			return AE_NO;
+
+		_running = false;
+
+		_cond_notify = true;
+		_cond_var.notify_all();
+
+		if (_thread.joinable())
+			_thread.join();
+
+		return AE_NO;
+	}
+
+	int filter_aresample::add_frame(AVFrame * frame)
+	{
+		std::unique_lock<std::mutex> lock(_mutex);
+
+		int error = AE_NO;
+		int ret = 0;
+
+		do {
+			int ret = av_buffersrc_add_frame_flags(_ctx_in.ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
+			if (ret < 0) {
+				error = AE_FILTER_ADD_FRAME_FAILED;
+				break;
+			}
+
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("add frame failed:%s ,%d", err2str(error), ret);
+		}
+
+		_cond_notify = true;
+		_cond_var.notify_all();
+
+		return error;
+	}
+
+	const AVRational filter_aresample::get_time_base()
+	{
+		return av_buffersink_get_time_base(_ctx_out.ctx);
+	}
+
+	void filter_aresample::cleanup()
+	{
+		if (_filter_graph)
+			avfilter_graph_free(&_filter_graph);
+
+		memset(&_ctx_in, 0, sizeof(FILTER_CTX));
+		memset(&_ctx_out, 0, sizeof(FILTER_CTX));
+
+		_inited = false;
+	}
+
+	void filter_aresample::filter_loop()
+	{
+		AVFrame *frame = av_frame_alloc();
+
+		int ret = 0;
+		while (_running) {
+			std::unique_lock<std::mutex> lock(_mutex);
+			while (!_cond_notify && _running)
+				_cond_var.wait_for(lock, std::chrono::milliseconds(300));
+
+			while (_running && _cond_notify) {
+				ret = av_buffersink_get_frame(_ctx_out.ctx, frame);
+				if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+					break;;
+				}
+
+				if (ret < 0) {
+					al_fatal("avfilter get frame error:%d", ret);
+					if (_on_filter_error) _on_filter_error(ret, _index);
+					break;
+				}
+
+				if (_on_filter_data)
+					_on_filter_data(frame, _index);
+
+				av_frame_unref(frame);
+			}
+
+			_cond_notify = false;
+		}
+
+		av_frame_free(&frame);
+	}
+
+}

+ 59 - 0
libs/Recorder/filter_aresample.h

@@ -0,0 +1,59 @@
+#ifndef FILTER_ARESAMPLE
+#define FILTER_ARESAMPLE
+
+#include "filter.h"
+
+namespace am {
+	
+	class filter_aresample
+	{
+	public:
+		filter_aresample();
+		~filter_aresample();
+
+		int init(const FILTER_CTX &ctx_in, const FILTER_CTX &ctx_out, int index);
+
+		inline void registe_cb(on_filter_data cb_on_filter_data, on_filter_error cb_on_filter_error) {
+			_on_filter_data = cb_on_filter_data;
+			_on_filter_error = cb_on_filter_error;
+		}
+
+		int start();
+
+		int stop();
+
+		int add_frame(AVFrame *frame);
+
+		const AVRational get_time_base();
+
+	private:
+		void cleanup();
+		void filter_loop();
+
+
+
+	private:
+		int _index;
+
+		FILTER_CTX _ctx_in;
+		FILTER_CTX _ctx_out;
+
+		AVFilterGraph *_filter_graph;
+
+		on_filter_data _on_filter_data;
+		on_filter_error _on_filter_error;
+
+		std::atomic_bool _inited;
+		std::atomic_bool _running;
+
+		std::thread _thread;
+
+		std::mutex _mutex;
+		std::condition_variable _cond_var;
+		bool _cond_notify;
+	};
+
+}
+
+
+#endif

+ 206 - 0
libs/Recorder/hardware_acceleration.cpp

@@ -0,0 +1,206 @@
+#include "hardware_acceleration.h"
+
+#include <map>
+#include <algorithm>
+
+#include "headers_ffmpeg.h"
+
+#include "d3d_helper.h"
+#include "log_helper.h"
+#include "utils_string.h"
+#include "error_define.h"
+
+
+namespace am {
+
+	static const std::map<_HARDWARE_TYPE, const char*> encoder_map = {
+		{ HARDWARE_TYPE_NVENC , "Nvidia.NVENC" },
+		{ HARDWARE_TYPE_QSV , "Intel.QSV" },
+		{ HARDWARE_TYPE_AMF , "AMD.AMF" },
+		{ HARDWARE_TYPE_VAAPI , "FFmpeg.Vaapi"}
+	};
+
+	static const std::list<std::string> nvenc_blacklist = {
+		"720M", "730M",  "740M",  "745M",  "820M",  "830M",
+		"840M", "845M",  "920M",  "930M",  "940M",  "945M",
+		"1030", "MX110", "MX130", "MX150", "MX230", "MX250",
+		"M520", "M500",  "P500",  "K620M"
+	};
+	
+
+	static bool get_encoder_name(HARDWARE_TYPE type, char name[ENCODER_NAME_LEN]);
+
+	static bool is_nvenc_blacklist(std::string desc);
+
+	static bool is_nvenc_canload();
+
+	static bool is_nvenc_support();
+
+	static bool is_qsv_support();
+
+	static bool is_amf_support();
+
+	static bool is_vaapi_support();
+
+	bool get_encoder_name(HARDWARE_TYPE type, char name[ENCODER_NAME_LEN]) {
+
+		if (encoder_map.find(type) == encoder_map.end()) return false;
+
+		strcpy_s(name, ENCODER_NAME_LEN, utils_string::ascii_utf8(encoder_map.at(type)).c_str());
+
+		return true;
+	}
+
+	bool is_nvenc_blacklist(std::string desc) {
+		for (auto itr = nvenc_blacklist.begin(); itr != nvenc_blacklist.end(); itr++) {
+			if (desc.find((*itr).c_str()) != std::string::npos)
+				return true;
+		}
+
+		return false;
+	}
+
+	bool is_nvenc_canload() {		
+		std::string module_name;
+		if (sizeof(void *) == 8) {
+			module_name = "nvEncodeAPI64.dll";
+		}
+		else {
+			module_name = "nvEncodeAPI.dll";
+		}
+
+		HMODULE hnvenc = GetModuleHandleA(module_name.c_str());
+		if (!hnvenc)
+			hnvenc = LoadLibraryA(module_name.c_str());
+
+
+		bool is_canload = !!hnvenc;
+
+		if (hnvenc) FreeModule(hnvenc);
+
+		return is_canload;
+	}
+
+	bool is_nvenc_support() {
+		bool is_support = false;
+
+#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)
+		av_register_all();
+#endif
+		do {
+			if (avcodec_find_encoder_by_name("nvenc_h264") == nullptr &&
+				avcodec_find_encoder_by_name("h264_nvenc") == nullptr)
+				break;
+
+#if defined(_WIN32)
+			int error = AE_NO;
+			auto adapters = d3d_helper::get_adapters(&error);
+			if (error != AE_NO || adapters.size() == 0)
+				break;
+
+			bool has_device = false;
+			for (std::list<IDXGIAdapter *>::iterator itr = adapters.begin(); itr != adapters.end(); itr++) {
+				IDXGIOutput *adapter_output = nullptr;
+				DXGI_ADAPTER_DESC adapter_desc = { 0 };
+				DXGI_OUTPUT_DESC adapter_output_desc = { 0 };
+
+				HRESULT hr = (*itr)->GetDesc(&adapter_desc);
+				
+				std::string strdesc = utils_string::unicode_ascii(adapter_desc.Description);
+				std::transform(strdesc.begin(), strdesc.end(), strdesc.begin(), ::toupper);
+
+				if (SUCCEEDED(hr) && (strdesc.find("NVIDIA") != std::string::npos) && !is_nvenc_blacklist(strdesc)) {
+					has_device = true;
+					break;
+				}
+			}
+
+			if(!has_device) break;
+			
+			if (!is_nvenc_canload()) break;
+#else
+			/*
+			if (!os_dlopen("libnvidia-encode.so.1"))
+				break;
+				*/
+#endif
+
+			is_support = true;
+		} while (0);
+
+		return is_support;
+	}
+
+	bool is_qsv_support() {
+		bool is_support = false;
+
+		return is_support;
+	}
+
+	bool is_amf_support() {
+		bool is_support = false;
+
+		return is_support;
+	}
+
+	bool is_vaapi_support() {
+		bool is_support = false;
+
+		return is_support;
+	}
+
+	std::vector<std::string> hardware_acceleration::get_video_hardware_devices() {
+		std::vector<std::string> devices;
+
+		enum AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
+
+		while ((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE) {
+			devices.push_back(av_hwdevice_get_type_name(type));
+			al_debug("%s",av_hwdevice_get_type_name(type));
+		}
+
+		
+		AVCodec *nvenc = avcodec_find_encoder_by_name("nvenc_h264");
+		if(nvenc == nullptr)
+			nvenc = avcodec_find_encoder_by_name("h264_nvenc");
+
+		if (nvenc)
+			al_debug("nvenc support");
+
+		AVCodec *vaapi = avcodec_find_encoder_by_name("h264_qsv");
+		if (vaapi)
+			al_debug("qsv support");
+
+		return devices;
+	}
+
+	std::list<HARDWARE_ENCODER> hardware_acceleration::get_supported_video_encoders() {
+		std::list<HARDWARE_ENCODER> encoders;
+
+		HARDWARE_ENCODER encoder;
+
+		encoder.type = HARDWARE_TYPE_NVENC;
+		if (is_nvenc_support() && get_encoder_name(encoder.type, encoder.name)) {
+			encoders.push_back(encoder);
+		}
+
+		encoder.type = HARDWARE_TYPE_QSV;
+		if (is_qsv_support() && get_encoder_name(encoder.type, encoder.name)) {
+			encoders.push_back(encoder);
+		}
+
+		encoder.type = HARDWARE_TYPE_AMF;
+		if (is_amf_support() && get_encoder_name(encoder.type, encoder.name)) {
+			encoders.push_back(encoder);
+		}
+
+		encoder.type = HARDWARE_TYPE_VAAPI;
+		if (is_vaapi_support() && get_encoder_name(encoder.type, encoder.name)) {
+			encoders.push_back(encoder);
+		}
+
+
+		return encoders;
+	}
+
+}

+ 37 - 0
libs/Recorder/hardware_acceleration.h

@@ -0,0 +1,37 @@
+#ifndef HARDWARE_ACCELERATION
+#define HARDWARE_ACCELERATION
+
+#include <string>
+#include <vector>
+#include <list>
+
+#define ENCODER_NAME_LEN 100
+
+namespace am {
+	typedef enum _HARDWARE_TYPE {
+		HARDWARE_TYPE_UNKNOWN,
+		HARDWARE_TYPE_NVENC,
+		HARDWARE_TYPE_QSV,
+		HARDWARE_TYPE_AMF,
+		HARDWARE_TYPE_VAAPI
+	}HARDWARE_TYPE;
+
+	typedef struct _HARDWARE_ENCODER {
+		HARDWARE_TYPE type;
+		char name[ENCODER_NAME_LEN];
+	}HARDWARE_ENCODER;
+
+	class hardware_acceleration
+	{
+	private:
+		hardware_acceleration(){}
+		~hardware_acceleration(){}
+
+	public:
+		static std::vector<std::string> get_video_hardware_devices();
+		static std::list<HARDWARE_ENCODER> get_supported_video_encoders();
+	};
+
+}
+
+#endif

+ 22 - 0
libs/Recorder/headers_ffmpeg.h

@@ -0,0 +1,22 @@
+#pragma once
+extern "C" {
+#include <libavformat\avformat.h>
+#include <libavcodec\avcodec.h>
+#include <libavdevice\avdevice.h>
+#include <libswscale\swscale.h>
+#include <libswresample\swresample.h>
+#include <libavutil\avassert.h>
+#include <libavutil\channel_layout.h>
+#include <libavutil\opt.h>
+#include <libavutil\mathematics.h>
+#include <libavutil\timestamp.h>
+#include <libavutil\error.h>
+#include <libavcodec\adts_parser.h>
+#include <libavutil\time.h>
+#include <libavfilter\avfilter.h>
+#include <libavfilter\buffersink.h>
+#include <libavfilter\buffersrc.h>
+#include <libavutil\imgutils.h>
+#include <libavutil\samplefmt.h>
+#include <libavutil\log.h>
+}

+ 4 - 0
libs/Recorder/headers_mmdevice.cpp

@@ -0,0 +1,4 @@
+#include <initguid.h>//must define in a cpp file only once
+
+#include "headers_mmdevice.h"
+

+ 31 - 0
libs/Recorder/headers_mmdevice.h

@@ -0,0 +1,31 @@
+#pragma once
+
+#ifdef _WIN32
+
+#include <windows.h>
+
+#include <mmdeviceapi.h>
+#include <propkeydef.h>//must include before functiondiscoverykeys_devpkey
+#include <functiondiscoverykeys_devpkey.h>
+
+#include <wrl/client.h>
+#include <devicetopology.h>
+
+#include <propsys.h>
+#include <AudioClient.h>
+#include <AudioPolicy.h>
+
+class com_initialize {
+public:
+	com_initialize() {
+		CoInitializeEx(NULL, COINIT_MULTITHREADED);
+	}
+	~com_initialize() {
+		CoUninitialize();
+	}
+};
+
+#define DEFAULT_AUDIO_INOUTPUT_NAME "Default"
+#define DEFAULT_AUDIO_INOUTPUT_ID "Default"
+
+#endif // _WIN32

+ 68 - 0
libs/Recorder/log_helper.cpp

@@ -0,0 +1,68 @@
+#include "log_helper.h"
+#include <stdio.h>
+#include <stdarg.h>
+#include <share.h>
+
+#include <mutex>
+
+#define AMLOCK(A) std::lock_guard<std::mutex> lock(A)
+
+#define LOG_ROLL_SIZE (1024 * 1024)
+
+AMLog* AMLog::_log = NULL;
+std::mutex _lock;
+
+AMLog::AMLog(FILE* handle)
+	: _handle(handle)
+{
+	_log = this;
+}
+
+AMLog::~AMLog()
+{
+	AMLOCK(_lock);
+	if (_log && _handle) {
+		fclose(_handle);
+		_log = NULL;
+	}
+}
+
+AMLog* AMLog::get(const char* path)
+{
+	if (_log || !path) {
+		return _log;
+	}
+	DWORD size = 0;
+	HANDLE file = CreateFile(path, GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
+		NULL);
+	if (file != INVALID_HANDLE_VALUE) {
+		size = GetFileSize(file, NULL);
+		CloseHandle(file);
+	}
+	if (size != INVALID_FILE_SIZE && size > LOG_ROLL_SIZE) {
+		if (DeleteFileA(path) == FALSE) {
+			TCHAR roll_path[MAX_PATH];
+			sprintf_s(roll_path, MAX_PATH, "%s.1", path);
+			if (!MoveFileEx(path, roll_path, MOVEFILE_REPLACE_EXISTING)) {
+				return NULL;
+			}
+		}
+	}
+	FILE* handle = _fsopen(path, "a+", _SH_DENYNO);
+	if (!handle) {
+		return NULL;
+	}
+	_log = new AMLog(handle);
+	return _log;
+}
+
+void AMLog::printf(const char* format, ...)
+{
+	AMLOCK(_lock);
+	va_list args;
+
+	va_start(args, format);
+	vfprintf(_handle, format, args);
+	va_end(args);
+	fflush(_handle);
+}

+ 63 - 0
libs/Recorder/log_helper.h

@@ -0,0 +1,63 @@
+#ifndef AM_LOG
+#define AM_LOG
+
+#include <stdio.h>
+#include <time.h>
+#include <sys\timeb.h>
+
+#include <windows.h>
+
+class AMLog {
+public:
+	~AMLog();
+	static AMLog* get(const char* path = NULL);
+	void printf(const char* format, ...);
+
+private:
+	AMLog(FILE* handle);
+
+private:
+	static AMLog* _log;
+	FILE* _handle;
+};
+
+
+enum AM_LOG_TYPE {
+	AL_TYPE_DEBUG = 0,
+	AL_TYPE_INFO,
+	AL_TYPE_WARN,
+	AL_TYPE_ERROR,
+	AL_TYPE_FATAL,
+};
+
+static const char *AM_LOG_STR[] = { "DEBUG", "INFO", "WARN", "ERROR", "FATAL" };
+
+#define al_printf(type,format,datetime,ms,...)                                 \
+         printf("%s-%.3d [%s] [%s(%d)] " format "\n",  datetime,ms,type, __FUNCTION__,__LINE__, ## __VA_ARGS__)
+
+#define PRINT_LINE(type, format, datetime, ms, ...)                     \
+    printf("%s-%.3d [%s] [%s(%d)] " format "\n",  datetime,ms,type, __FUNCTION__,__LINE__, ## __VA_ARGS__)
+
+#define al_log(type,format,...) do{                                            \
+	struct _timeb now;                                                           \
+	struct tm today;                                                             \
+	char datetime_str[20];                                                       \
+	_ftime_s(&now);                                                              \
+	localtime_s(&today, &now.time);                                              \
+	strftime(datetime_str, 20, "%Y-%m-%d %H:%M:%S", &today);                     \
+	AMLog *am_log = AMLog::get();                                                \
+	if(am_log){                                                                     \
+		am_log->PRINT_LINE(AM_LOG_STR[type], format, datetime_str, now.millitm, ## __VA_ARGS__);  \
+	} else {                                                                      \
+		al_printf(AM_LOG_STR[type], format, datetime_str, now.millitm, ## __VA_ARGS__);  \
+	}                                                                             \
+}while (0)   
+
+
+#define al_debug(format, ...) al_log(AL_TYPE_DEBUG, format, ## __VA_ARGS__)
+#define al_info(format, ...) al_log(AL_TYPE_INFO, format, ## __VA_ARGS__)
+#define al_warn(format, ...) al_log(AL_TYPE_WARN, format, ## __VA_ARGS__)
+#define al_error(format, ...) al_log(AL_TYPE_ERROR, format, ## __VA_ARGS__)
+#define al_fatal(format, ...) al_log(AL_TYPE_FATAL, format, ## __VA_ARGS__)
+
+#endif

+ 498 - 0
libs/Recorder/main.cpp

@@ -0,0 +1,498 @@
+#include "device_audios.h"
+
+#include "record_audio_factory.h"
+#include "record_desktop_factory.h"
+#include "headers_mmdevice.h"
+
+#include "encoder_aac.h"
+#include "resample_pcm.h"
+#include "filter_aresample.h"
+
+#include "muxer_define.h"
+#include "muxer_ffmpeg.h"
+
+#include "utils_string.h"
+#include "system_version.h"
+#include "error_define.h"
+#include "log_helper.h"
+#include "hardware_acceleration.h"
+
+#include "remuxer_ffmpeg.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#define USE_WASAPI 1
+
+#define V_FRAME_RATE 30
+#define V_BIT_RATE 1280*1000
+#define V_WIDTH GetSystemMetrics(SM_CXSCREEN)
+#define V_HEIGHT  GetSystemMetrics(SM_CYSCREEN);
+#define V_QUALITY 100
+
+#define A_SAMPLE_CHANNEL 2
+#define A_SAMPLE_RATE 44100
+#define A_BIT_RATE 128000
+
+
+//for test muxer
+static am::record_audio *_recorder_speaker = nullptr;
+static am::record_audio *_recorder_microphone = nullptr;
+static am::record_desktop *_recorder_desktop = nullptr;
+static am::muxer_file *_muxer;
+static am::record_audio *audios[2];
+
+//for test audio record
+static am::record_audio *_recorder_audio = nullptr;
+static am::encoder_aac *_encoder_aac = nullptr;
+static am::resample_pcm *_resample_pcm = nullptr;
+static am::filter_aresample *_filter_aresample = nullptr;
+
+static int _sample_in = 0;
+static int _sample_size = 0;
+static int _resample_size = 0;
+static uint8_t *_sample_buffer = nullptr;
+static uint8_t *_resample_buffer = nullptr;
+
+int start_muxer() {
+	std::string input_id, input_name, out_id, out_name;
+
+	am::device_audios::get_default_input_device(input_id, input_name);
+
+	al_info("use default input aduio device:%s", input_name.c_str());
+
+	am::device_audios::get_default_ouput_device(out_id, out_name);
+
+	al_info("use default output aduio device:%s", out_name.c_str());
+
+	//first audio resrouce must be speaker,otherwise the audio pts may be not correct,may need to change the filter amix descriptions with duration & sync option
+#if !USE_WASAPI
+
+	record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_DSHOW, &_recorder_speaker);
+	_recorder_speaker->init(
+		am::utils_string::ascii_utf8("audio=virtual-audio-capturer"),
+		am::utils_string::ascii_utf8("audio=virtual-audio-capturer"), 
+		false
+	);
+
+	record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_DSHOW, &_recorder_microphone);
+	_recorder_microphone->init(am::utils_string::ascii_utf8("audio=") + input_name, input_id, true);
+#else
+	record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_speaker);
+	_recorder_speaker->init(out_name, out_id, false);
+
+	record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_microphone);
+	_recorder_microphone->init(input_name, input_id, true);
+#endif // !USE_WASAPI
+
+
+	record_desktop_new(RECORD_DESKTOP_TYPES::DT_DESKTOP_WIN_DUPLICATION, &_recorder_desktop);
+
+	RECORD_DESKTOP_RECT rect;
+	rect.left = 0;
+	rect.top = 0;
+	rect.right = V_WIDTH;
+	rect.bottom = V_HEIGHT;
+
+	_recorder_desktop->init(rect, V_FRAME_RATE);
+
+	audios[0] = _recorder_microphone;
+	audios[1] = _recorder_speaker;
+
+	_muxer = new am::muxer_ffmpeg();
+
+
+	am::MUX_SETTING setting;
+	setting.v_frame_rate = V_FRAME_RATE;
+	setting.v_bit_rate = V_BIT_RATE;
+	setting.v_width = V_WIDTH;
+	setting.v_height = V_HEIGHT;
+	setting.v_qb = V_QUALITY;
+	setting.v_encoder_id = am::EID_VIDEO_X264;
+	setting.v_out_width = 1920;
+	setting.v_out_height = 1080;
+
+	setting.a_nb_channel = A_SAMPLE_CHANNEL;
+	setting.a_sample_fmt = AV_SAMPLE_FMT_FLTP;
+	setting.a_sample_rate = A_SAMPLE_RATE;
+	setting.a_bit_rate = A_BIT_RATE;
+
+	int error = _muxer->init(am::utils_string::ascii_utf8("..\\..\\save.mp4").c_str(), _recorder_desktop, audios, 2, setting);
+	if (error != AE_NO) {
+		return error;
+	}
+
+	_muxer->start();
+
+	return error;
+}
+
+void stop_muxer()
+{
+	_muxer->stop();
+
+	delete _recorder_desktop;
+
+	if(_recorder_speaker)
+		delete _recorder_speaker;
+
+	if (_recorder_microphone)
+		delete _recorder_microphone;
+
+	delete _muxer;
+}
+
+void test_recorder()
+{
+	//av_log_set_level(AV_LOG_DEBUG);
+
+	start_muxer();
+
+	getchar();
+
+	//stop have bug that sometime will stuck
+	stop_muxer();
+
+	al_info("record stoped...");
+}
+
+void show_devices()
+{
+	std::list<am::DEVICE_AUDIOS> devices;
+
+	am::device_audios::get_input_devices(devices);
+
+	for each (auto device in devices)
+	{
+		al_info("audio input name:%s id:%s", device.name.c_str(), device.id.c_str());
+	}
+
+	am::device_audios::get_output_devices(devices);
+
+	for each (auto device in devices)
+	{
+		al_info("audio output name:%s id:%s", device.name.c_str(), device.id.c_str());
+	}
+}
+
+void on_aac_data(AVPacket *packet) {
+	al_debug("on aac data :%d", packet->size);
+}
+
+void on_aac_error(int) {
+
+}
+
+void on_pcm_error(int, int) {
+
+}
+
+void on_pcm_data1(AVFrame *frame, int index) {
+
+	int copied_len = 0;
+	int sample_len = av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, (AVSampleFormat)frame->format, 1);
+	int remain_len = sample_len;
+
+	int is_planner = av_sample_fmt_is_planar((AVSampleFormat)frame->format);
+
+	while (remain_len > 0) {//should add is_planner codes
+							//cache pcm
+		copied_len = min(_sample_size - _sample_in, remain_len);
+		if (copied_len) {
+			memcpy(_sample_buffer + _sample_in, frame->data[0] + sample_len - remain_len, copied_len);
+			_sample_in += copied_len;
+			remain_len = remain_len - copied_len;
+		}
+
+		//got enough pcm to encoder,resample and mix
+		if (_sample_in == _sample_size) {
+			int ret = _resample_pcm->convert(_sample_buffer, _sample_size, _resample_buffer,_resample_size);
+			if (ret > 0) {
+				_encoder_aac->put(_resample_buffer, _resample_size, frame);
+			}
+			else {
+				al_debug("resample audio %d failed,%d", index, ret);
+			}
+
+			_sample_in = 0;
+		}
+	}
+}
+
+void on_pcm_data(AVFrame *frame, int index) {
+	_filter_aresample->add_frame(frame);
+}
+
+void on_aresample_data(AVFrame * frame,int index) {
+	int copied_len = 0;
+	int sample_len = av_samples_get_buffer_size(frame->linesize, frame->channels, frame->nb_samples, (AVSampleFormat)frame->format, 1);
+	sample_len = av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, (AVSampleFormat)frame->format, 1);
+
+	int remain_len = sample_len;
+
+	//for data is planar,should copy data[0] data[1] to correct buff pos
+	if (av_sample_fmt_is_planar((AVSampleFormat)frame->format) == 0) {
+		while (remain_len > 0) {
+			//cache pcm
+			copied_len = min(_sample_size - _sample_in, remain_len);
+			if (copied_len) {
+				memcpy(_resample_buffer + _sample_in, frame->data[0] + sample_len - remain_len, copied_len);
+				_sample_in += copied_len;
+				remain_len = remain_len - copied_len;
+			}
+
+			//got enough pcm to encoder,resample and mix
+			if (_sample_in == _sample_size) {
+				_encoder_aac->put(_resample_buffer, _sample_size, frame);
+
+				_sample_in = 0;
+			}
+		}
+	}
+	else {//resample size is channels*frame->linesize[0],for 2 channels
+		while (remain_len > 0) {
+			copied_len = min(_sample_size - _sample_in, remain_len);
+			if (copied_len) {
+				memcpy(_resample_buffer + _sample_in / 2, frame->data[0] + (sample_len - remain_len) / 2, copied_len / 2);
+				memcpy(_resample_buffer + _sample_size / 2 + _sample_in / 2, frame->data[1] + (sample_len - remain_len) / 2, copied_len / 2);
+				_sample_in += copied_len;
+				remain_len = remain_len - copied_len;
+			}
+
+			if (_sample_in == _sample_size) {
+				_encoder_aac->put(_resample_buffer, _sample_size, frame);
+
+				_sample_in = 0;
+			}
+		}
+	}
+}
+
+void on_aresample_error(int error,int index) {
+
+}
+
+void save_aac() {
+	std::string input_id, input_name, out_id, out_name;
+
+	am::device_audios::get_default_input_device(input_id, input_name);
+
+	am::device_audios::get_default_ouput_device(out_id, out_name);
+
+#if 0
+	al_info("use default \r\noutput aduio device name:%s \r\noutput audio device id:%s ",
+		out_name.c_str(), out_id.c_str());
+
+	record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_audio);
+	_recorder_audio->init(out_name, out_id, false);
+#else
+	al_info("use default \r\ninput aduio device name:%s \r\ninput audio device id:%s ",
+		input_name.c_str(), input_id.c_str());
+
+	record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_audio);
+	_recorder_audio->init(input_name, input_id, true);
+#endif
+
+	_recorder_audio->registe_cb(on_pcm_data, on_pcm_error, 0);
+
+	_encoder_aac = new am::encoder_aac();
+	_encoder_aac->init(A_SAMPLE_CHANNEL, A_SAMPLE_RATE, AV_SAMPLE_FMT_FLTP, A_BIT_RATE);
+
+	am::SAMPLE_SETTING src, dst = { 0 };
+	src = {
+		_encoder_aac->get_nb_samples(),
+		av_get_default_channel_layout(_recorder_audio->get_channel_num()),
+		_recorder_audio->get_channel_num(),
+		_recorder_audio->get_fmt(),
+		_recorder_audio->get_sample_rate()
+	};
+	dst = {
+		_encoder_aac->get_nb_samples(),
+		av_get_default_channel_layout(A_SAMPLE_CHANNEL),
+		A_SAMPLE_CHANNEL,
+		AV_SAMPLE_FMT_FLTP,
+		A_SAMPLE_RATE
+	};
+
+	_resample_pcm = new am::resample_pcm();
+	_resample_pcm->init(&src, &dst, &_resample_size);
+	_resample_buffer = new uint8_t[_resample_size];
+
+	_sample_size = av_samples_get_buffer_size(NULL, A_SAMPLE_CHANNEL, _encoder_aac->get_nb_samples(), _recorder_audio->get_fmt(), 1);
+	_sample_buffer = new uint8_t[_sample_size];
+
+	_filter_aresample = new am::filter_aresample();
+	_filter_aresample->init({
+		NULL,NULL,
+		_recorder_audio->get_time_base(),
+		_recorder_audio->get_sample_rate(),
+		_recorder_audio->get_fmt(),
+		_recorder_audio->get_channel_num(),
+		av_get_default_channel_layout(_recorder_audio->get_channel_num())
+	}, {
+		NULL,NULL,
+		{ 1,AV_TIME_BASE },
+		A_SAMPLE_RATE,
+		AV_SAMPLE_FMT_FLTP,
+		A_SAMPLE_CHANNEL,
+		av_get_default_channel_layout(A_SAMPLE_CHANNEL)
+	},0);
+	_filter_aresample->registe_cb(on_aresample_data, on_aresample_error);
+
+
+	_filter_aresample->start();
+	_encoder_aac->start();
+	_recorder_audio->start();
+
+	getchar();
+
+	_recorder_audio->stop();
+	_filter_aresample->stop();
+
+	delete _recorder_audio;
+	delete _encoder_aac;
+	delete _resample_pcm;
+	delete _filter_aresample;
+
+	delete[] _sample_buffer;
+	delete[] _resample_buffer;
+}
+
+void test_audio() 
+{
+	std::string input_id, input_name, out_id, out_name;
+
+	am::device_audios::get_default_input_device(input_id, input_name);
+
+	al_info("use default \r\ninput aduio device name:%s \r\ninput audio device id:%s ", 
+		input_name.c_str(), input_id.c_str());
+
+	am::device_audios::get_default_ouput_device(out_id, out_name);
+
+	al_info("use default \r\noutput aduio device name:%s \r\noutput audio device id:%s ", 
+		out_name.c_str(), out_id.c_str());
+
+	record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_speaker);
+	_recorder_speaker->init(am::utils_string::ascii_utf8("Default"), am::utils_string::ascii_utf8("Default"), false);
+
+
+	//record_audio_new(RECORD_AUDIO_TYPES::AT_AUDIO_WAS, &_recorder_microphone);
+	//_recorder_microphone->init(input_name,input_id, true);
+
+	_recorder_speaker->start();
+
+	//_recorder_microphone->start();
+
+	getchar();
+
+	_recorder_speaker->stop();
+	//_recorder_microphone->stop();
+}
+
+
+void on_remux_progress(const char *src, int progress, int total)
+{
+	al_debug("on remux progress:%s %d %d", src, progress, total);
+}
+
+void on_remux_state(const char *src, int state, int error) {
+	al_debug("on remux state:%s %d %d", src, state, error);
+}
+
+void test_remux() {
+#if TEST_MULTI_THREAD
+	for (int i = 0; i < 20; i++) {
+		am::REMUXER_PARAM param = { 0 };
+		sprintf_s(param.src, 260, "%d", i);
+		am::remuxer_ffmpeg::instance()->create_remux(param);
+	}
+#else
+	am::REMUXER_PARAM param = { 0 };
+
+	sprintf_s(param.src, 260, "%s", am::utils_string::ascii_utf8("..\\..\\save.mkv").c_str());
+	sprintf_s(param.dst, 260, "%s", am::utils_string::ascii_utf8("..\\..\\save.mp4").c_str());
+
+	param.cb_progress = on_remux_progress;
+
+	param.cb_state = on_remux_state;
+	
+	am::remuxer_ffmpeg::instance()->create_remux(param);
+#endif
+}
+
+void test_scale() {
+	static const double scaled_vals[] = { 1.0,         1.25, (1.0 / 0.75), 1.5,
+		(1.0 / 0.6), 1.75, 2.0,          2.25,
+		2.5,         2.75, 3.0,          0.0 };
+
+	auto get_valid_out_resolution = [](int src_width, int src_height, int * out_width, int * out_height)
+	{
+		int scale_cx = src_width;
+		int scale_cy = src_height;
+
+		int i = 0;
+
+		while (((scale_cx * scale_cy) > (1920 * 1080)) && scaled_vals[i] > 0.0) {
+			double scale = scaled_vals[i++];
+			scale_cx = uint32_t(double(src_width) / scale);
+			scale_cy = uint32_t(double(src_height) / scale);
+		}
+
+		if (scale_cx % 2 != 0) {
+			scale_cx += 1;
+		}
+
+		if (scale_cy % 2 != 0) {
+			scale_cy += 1;
+		}
+
+
+		*out_width = scale_cx;
+		*out_height = scale_cy;
+
+		al_info("get valid output resolution from %dx%d to %dx%d,with scale:%lf", src_width, src_height, scale_cx, scale_cy, scaled_vals[i]);
+	};
+
+	int src_width=2736, src_height=1824;
+	int dst_width, dst_height;
+
+	get_valid_out_resolution(src_width, src_height, &dst_width, &dst_height);
+}
+
+int main(int argc, char **argv)
+{
+	al_info("record start...");
+
+	am::winversion_info ver = { 0 };
+
+	am::system_version::get_win(&ver);
+
+	bool is_win8_or_above = am::system_version::is_win8_or_above();
+
+	bool is_ia32 = am::system_version::is_32();
+
+	al_info("win version: %d.%d.%d.%d", ver.major, ver.minor, ver.build, ver.revis);
+	al_info("is win8 or above: %s", is_win8_or_above ? "true" : "false");
+
+	//auto hw_encoders = am::hardware_acceleration::get_supported_video_encoders();
+
+	//show_devices();
+
+	//test_audio();
+
+	test_recorder();
+
+	//test_remux();
+
+	//save_aac();
+
+	//test_scale();
+
+
+	al_info("press any key to exit...");
+	system("pause");
+
+	return 0;
+}

+ 28 - 0
libs/Recorder/mul_db.h

@@ -0,0 +1,28 @@
+#ifndef MUL_DB
+#define MUL_DB
+
+#include <math.h>
+
+#ifdef _MSC_VER
+#include <float.h>
+
+#pragma warning(push)
+#pragma warning(disable : 4056)
+#pragma warning(disable : 4756)
+#endif
+
+static inline float mul_to_db(const float mul)
+{
+	return (mul == 0.0f) ? -INFINITY : (20.0f * log10f(mul));
+}
+
+static inline float db_to_mul(const float db)
+{
+	return isfinite((double)db) ? powf(10.0f, db / 20.0f) : 0.0f;
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#endif // !MUL_DB

+ 63 - 0
libs/Recorder/muxer_define.h

@@ -0,0 +1,63 @@
+#ifndef MUXER_DEFINE
+#define MUXER_DEFINE
+
+#include "encoder_video_define.h"
+
+namespace am {
+	typedef struct {
+		uint8_t *buff;
+		int size;
+		int sample_in;
+	}AUDIO_SAMPLE;
+
+	class encoder_video;
+	class record_desktop;
+	class sws_helper;
+
+	class encoder_aac;
+	class filter_amix;
+	class filter_aresample;
+	class record_audio;
+
+	typedef struct MUX_SETTING_T {
+		int v_frame_rate;
+		int v_bit_rate;
+		int v_width;
+		int v_height;
+		int v_out_width;
+		int v_out_height;
+		int v_qb;
+		ENCODER_VIDEO_ID v_encoder_id;
+
+		int a_nb_channel;
+		int a_sample_rate;
+		AVSampleFormat a_sample_fmt;
+		int a_bit_rate;
+	}MUX_SETTING;
+
+	typedef struct MUX_STREAM_T {
+		//common
+		AVStream *st;               // av stream
+		AVBitStreamFilterContext *filter; //pps|sps adt
+
+		uint64_t pre_pts;
+
+		MUX_SETTING setting;        // output setting
+
+		//video
+		encoder_video *v_enc;         // video encoder
+		record_desktop *v_src;      // video source
+		sws_helper *v_sws;          // video sws
+
+		//audio
+		encoder_aac *a_enc;                     // audio encoder
+		filter_amix *a_filter_amix;             // audio mixer
+		filter_aresample **a_filter_aresample;  // audio resamplers
+		int a_nb;							    // audio source num
+		record_audio **a_src;				    // audio sources
+		AUDIO_SAMPLE **a_samples;			    // audio sample data
+		AUDIO_SAMPLE **a_resamples;			    // audio resampled data
+	}MUX_STREAM;
+}
+
+#endif

+ 907 - 0
libs/Recorder/muxer_ffmpeg.cpp

@@ -0,0 +1,907 @@
+#include "muxer_ffmpeg.h"
+#include "muxer_define.h"
+
+#include "record_desktop.h"
+#include "encoder_video.h"
+#include "encoder_video_factory.h"
+#include "sws_helper.h"
+
+#include "record_audio.h"
+#include "encoder_aac.h"
+#include "filter_amix.h"
+#include "filter_aresample.h"
+
+#include "ring_buffer.h"
+
+#include "log_helper.h"
+#include "error_define.h"
+
+
+namespace am {
+
+	muxer_ffmpeg::muxer_ffmpeg()
+	{
+		av_register_all();
+
+		_v_stream = NULL;
+		_a_stream = NULL;
+
+		_fmt = NULL;
+		_fmt_ctx = NULL;
+
+		_base_time = -1;
+	}
+
+	muxer_ffmpeg::~muxer_ffmpeg()
+	{
+		stop();
+		cleanup();
+	}
+
+	int muxer_ffmpeg::init(
+		const char * output_file,
+		record_desktop * source_desktop,
+		record_audio ** source_audios,
+		const int source_audios_nb,
+		const MUX_SETTING_T & setting
+	)
+	{
+		int error = AE_NO;
+		int ret = 0;
+
+		do {
+			al_info("start to initialize muxer ,output:%s ", output_file);
+
+			error = alloc_oc(output_file, setting);
+			if (error != AE_NO)
+				break;
+
+			if (_fmt->video_codec != AV_CODEC_ID_NONE) {
+				error = add_video_stream(setting, source_desktop);
+				if (error != AE_NO)
+					break;
+			}
+
+			if (_fmt->audio_codec != AV_CODEC_ID_NONE && source_audios_nb) {
+				error = add_audio_stream(setting, source_audios, source_audios_nb);
+				if (error != AE_NO)
+					break;
+			}
+
+			error = open_output(output_file, setting);
+			if (error != AE_NO)
+				break;
+
+			av_dump_format(_fmt_ctx, 0, NULL, 1);
+
+			_inited = true;
+
+		} while (0);
+
+		if (error != AE_NO) {
+			cleanup();
+			al_debug("muxer ffmpeg initialize failed:%s %d", err2str(error), ret);
+		}
+
+		return error;
+	}
+
+	int muxer_ffmpeg::start()
+	{
+		std::lock_guard<std::mutex> lock(_mutex);
+
+		int error = AE_NO;
+
+		if (_running == true) {
+			return AE_NO;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+		_base_time = av_gettime_relative();
+
+		if (_v_stream && _v_stream->v_enc)
+			_v_stream->v_enc->start();
+
+		if (_a_stream && _a_stream->a_enc)
+			_a_stream->a_enc->start();
+
+		if (_a_stream && _a_stream->a_nb >= 2 && _a_stream->a_filter_amix)
+			_a_stream->a_filter_amix->start();
+
+		if (_a_stream && _a_stream->a_nb < 2 && _a_stream->a_filter_aresample) {
+			for (int i = 0; i < _a_stream->a_nb; i++) {
+				_a_stream->a_filter_aresample[i]->start();
+			}
+		}
+
+		if (_a_stream && _a_stream->a_src) {
+			for (int i = 0; i < _a_stream->a_nb; i++) {
+				if(_a_stream->a_src[i])
+					_a_stream->a_src[i]->start();
+			}
+		}
+
+		if (_v_stream && _v_stream->v_src)
+			_v_stream->v_src->start();
+
+		_running = true;
+
+		return error;
+	}
+
+	int muxer_ffmpeg::stop()
+	{
+		std::lock_guard<std::mutex> lock(_mutex);
+
+		if (_running == false)
+			return AE_NO;
+
+		_running = false;
+
+		al_debug("try to stop muxer....");
+
+		al_debug("stop audio recorder...");
+		if (_a_stream && _a_stream->a_src) {
+			for (int i = 0; i < _a_stream->a_nb; i++) {
+				_a_stream->a_src[i]->stop();
+			}
+		}
+
+		al_debug("stop video recorder...");
+		if (_v_stream && _v_stream->v_src)
+			_v_stream->v_src->stop();
+
+		al_debug("stop audio amix filter...");
+		if (_a_stream && _a_stream->a_filter_amix)
+			_a_stream->a_filter_amix->stop();
+
+		al_debug("stop audio aresampler filter...");
+		if (_a_stream && _a_stream->a_filter_aresample) {
+			for (int i = 0; i < _a_stream->a_nb; i++) {
+				_a_stream->a_filter_aresample[i]->stop();
+			}
+		}
+
+
+		al_debug("stop video encoder...");
+		if (_v_stream && _v_stream->v_enc)
+			_v_stream->v_enc->stop();
+
+		al_debug("stop audio encoder...");
+		if (_a_stream) {
+			if (_a_stream->a_enc)
+				_a_stream->a_enc->stop();
+		}
+
+		al_debug("write file trailer...");
+		if (_fmt_ctx)
+			av_write_trailer(_fmt_ctx);//must write trailer ,otherwise file can not play
+
+		al_debug("muxer stopped...");
+
+		return AE_NO;
+	}
+
+	int muxer_ffmpeg::pause()
+	{
+		_paused = true;
+
+		return 0;
+	}
+
+	int muxer_ffmpeg::resume()
+	{
+		_paused = false;
+		return 0;
+	}
+
+	void muxer_ffmpeg::on_desktop_data(AVFrame *frame)
+	{
+		if (_running == false || _paused == true || !_v_stream || !_v_stream->v_enc || !_v_stream->v_sws) {
+			return;
+		}
+
+		int len = 0, ret = AE_NO;
+		uint8_t *yuv_data = NULL;
+		
+		ret = _v_stream->v_sws->convert(frame, &yuv_data, &len);
+		
+		if (ret == AE_NO && yuv_data && len) {
+			_v_stream->v_enc->put(yuv_data, len, frame);
+
+			if (_on_yuv_data && _preview_enabled == true)
+				_on_yuv_data(yuv_data, len, frame->width, frame->height, 0);
+		}
+	}
+
+	void muxer_ffmpeg::on_desktop_error(int error)
+	{
+		al_fatal("on desktop capture error:%d", error);
+	}
+
+	int getPcmDB(const unsigned char *pcmdata, size_t size) {
+
+		int db = 0;
+		float value = 0;
+		double sum = 0;
+		double average = 0;
+		int bit_per_sample = 32;
+		int byte_per_sample = bit_per_sample / 8;
+		int channel_num = 2;
+
+		for (int i = 0; i < size; i += channel_num * byte_per_sample)
+		{
+			memcpy(&value, pcmdata + i, byte_per_sample);
+			sum += abs(value);
+		}
+		average = sum / (double)(size / byte_per_sample /channel_num);
+		if (average > 0)
+		{
+			db = (int)(20 * log10f(average));
+		}
+
+		al_debug("%d   %f     %f", db, average,sum);
+		return db;
+	}
+
+	static int pcm_fltp_db_count(AVFrame *frame, int channels)
+	{
+		int i = 0, ch = 0;
+		int ndb = 0;
+		float value = 0.;
+		float *ch_left = (float *)frame->data[0];
+		//float *ch_right = (float *)frame->data[1];
+		for (i = 0; i < frame->nb_samples; i++)
+		{
+			value += fabs(ch_left[i]);
+		}
+
+		value = value / frame->nb_samples;
+		if (0 != value)
+		{
+			ndb = (int)(20.0*log10((value / 1.0)));
+		}
+		else
+			ndb = -100;
+
+		return ndb;
+	}
+
+	void muxer_ffmpeg::on_audio_data(AVFrame *frame, int index)
+	{
+		if (_running == false || _paused == true)
+			return;
+
+		if (_a_stream->a_filter_amix != nullptr)
+			_a_stream->a_filter_amix->add_frame(frame, index);
+		else if (_a_stream->a_filter_aresample != nullptr && _a_stream->a_filter_aresample[index] != nullptr) {
+			_a_stream->a_filter_aresample[index]->add_frame(frame);
+		}
+
+		return;
+	}
+
+	void muxer_ffmpeg::on_audio_error(int error, int index)
+	{
+		al_fatal("on audio capture error:%d with stream index:%d", error, index);
+	}
+
+	void muxer_ffmpeg::on_filter_amix_data(AVFrame * frame, int)
+	{
+		if (_running == false || !_a_stream->a_enc)
+			return;
+
+
+
+		AUDIO_SAMPLE *resamples = _a_stream->a_resamples[0];
+
+		int copied_len = 0;
+		int sample_len = av_samples_get_buffer_size(frame->linesize, frame->channels, frame->nb_samples, (AVSampleFormat)frame->format, 1);
+		sample_len = av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, (AVSampleFormat)frame->format, 1);
+
+#ifdef _DEBUG
+		//al_debug("dg:%d", pcm_fltp_db_count(frame, frame->channels));
+#endif
+
+		int remain_len = sample_len;
+
+		//for data is planar,should copy data[0] data[1] to correct buff pos
+		if (av_sample_fmt_is_planar((AVSampleFormat)frame->format) == 0) {
+			while (remain_len > 0) {
+				//cache pcm
+				copied_len = min(resamples->size - resamples->sample_in, remain_len);
+				if (copied_len) {
+					memcpy(resamples->buff + resamples->sample_in, frame->data[0] + sample_len - remain_len, copied_len);
+					resamples->sample_in += copied_len;
+					remain_len = remain_len - copied_len;
+				}
+
+				//got enough pcm to encoder,resample and mix
+				if (resamples->sample_in == resamples->size) {
+					_a_stream->a_enc->put(resamples->buff, resamples->size, frame);
+
+					resamples->sample_in = 0;
+				}
+			}
+		}
+		else {//resample size is channels*frame->linesize[0],for 2 channels
+			while (remain_len > 0) {
+				copied_len = min(resamples->size - resamples->sample_in, remain_len);
+				if (copied_len) {
+					memcpy(resamples->buff + resamples->sample_in / 2, frame->data[0] + (sample_len - remain_len) / 2, copied_len / 2);
+					memcpy(resamples->buff + resamples->size / 2 + resamples->sample_in / 2, frame->data[1] + (sample_len - remain_len) / 2, copied_len / 2);
+					resamples->sample_in += copied_len;
+					remain_len = remain_len - copied_len;
+				}
+
+				if (resamples->sample_in == resamples->size) {
+					_a_stream->a_enc->put(resamples->buff, resamples->size, frame);
+
+					resamples->sample_in = 0;
+				}
+			}
+		}
+	}
+
+	void muxer_ffmpeg::on_filter_amix_error(int error, int)
+	{
+		al_fatal("on filter amix audio error:%d", error);
+	}
+
+	void muxer_ffmpeg::on_filter_aresample_data(AVFrame * frame, int index)
+	{
+		if (_running == false || !_a_stream->a_enc)
+			return;
+
+
+		AUDIO_SAMPLE *resamples = _a_stream->a_resamples[0];
+
+		int copied_len = 0;
+		int sample_len = av_samples_get_buffer_size(frame->linesize, frame->channels, frame->nb_samples, (AVSampleFormat)frame->format, 1);
+		sample_len = av_samples_get_buffer_size(NULL, frame->channels, frame->nb_samples, (AVSampleFormat)frame->format, 1);
+
+		int remain_len = sample_len;
+
+		//for data is planar,should copy data[0] data[1] to correct buff pos
+		if (av_sample_fmt_is_planar((AVSampleFormat)frame->format) == 0) {
+			while (remain_len > 0) {
+
+				//cache pcm
+				copied_len = min(resamples->size - resamples->sample_in, remain_len);
+				if (copied_len) {
+					memcpy(resamples->buff + resamples->sample_in, frame->data[0] + sample_len - remain_len, copied_len);
+					resamples->sample_in += copied_len;
+					remain_len = remain_len - copied_len;
+				}
+
+				//got enough pcm to encoder,resample and mix
+				if (resamples->sample_in == resamples->size) {
+					_a_stream->a_enc->put(resamples->buff, resamples->size, frame);
+
+					resamples->sample_in = 0;
+				}
+			}
+		}
+		else {//resample size is channels*frame->linesize[0],for 2 channels
+			while (remain_len > 0) {
+				copied_len = min(resamples->size - resamples->sample_in, remain_len);
+
+				if (copied_len) {
+					memcpy(resamples->buff + resamples->sample_in / 2, frame->data[0] + (sample_len - remain_len) / 2, copied_len / 2);
+					memcpy(resamples->buff + resamples->size / 2 + resamples->sample_in / 2, frame->data[1] + (sample_len - remain_len) / 2, copied_len / 2);
+					resamples->sample_in += copied_len;
+					remain_len = remain_len - copied_len;
+				}
+
+				if (resamples->sample_in == resamples->size) {
+					_a_stream->a_enc->put(resamples->buff, resamples->size, frame);
+
+					resamples->sample_in = 0;
+				}
+			}
+		}
+	}
+
+	void muxer_ffmpeg::on_filter_aresample_error(int error, int index)
+	{
+		al_fatal("on filter aresample[%d] audio error:%d", index, error);
+	}
+
+	void muxer_ffmpeg::on_enc_264_data(AVPacket *packet)
+	{
+		if (_running && _v_stream) {
+			write_video(packet);
+		}
+	}
+
+	void muxer_ffmpeg::on_enc_264_error(int error)
+	{
+		al_fatal("on desktop encode error:%d", error);
+	}
+
+	void muxer_ffmpeg::on_enc_aac_data(AVPacket *packet)
+	{
+		if (_running && _a_stream) {
+			write_audio(packet);
+		}
+	}
+
+	void muxer_ffmpeg::on_enc_aac_error(int error)
+	{
+		al_fatal("on audio encode error:%d", error);
+	}
+
+	int muxer_ffmpeg::alloc_oc(const char * output_file, const MUX_SETTING_T & setting)
+	{
+		_output_file = std::string(output_file);
+
+		int error = AE_NO;
+		int ret = 0;
+
+		do {
+			ret = avformat_alloc_output_context2(&_fmt_ctx, NULL, NULL, output_file);
+			if (ret < 0 || !_fmt_ctx) {
+				error = AE_FFMPEG_ALLOC_CONTEXT_FAILED;
+				break;
+			}
+
+			_fmt = _fmt_ctx->oformat;
+		} while (0);
+
+		return error;
+	}
+
+	int muxer_ffmpeg::add_video_stream(const MUX_SETTING_T & setting, record_desktop * source_desktop)
+	{
+		int error = AE_NO;
+		int ret = 0;
+
+		_v_stream = new MUX_STREAM();
+		memset(_v_stream, 0, sizeof(MUX_STREAM));
+
+		_v_stream->v_src = source_desktop;
+
+		_v_stream->pre_pts = -1;
+		
+		_v_stream->v_src->registe_cb(
+			std::bind(&muxer_ffmpeg::on_desktop_data, this, std::placeholders::_1),
+			std::bind(&muxer_ffmpeg::on_desktop_error, this, std::placeholders::_1)
+		);
+
+		RECORD_DESKTOP_RECT v_rect = _v_stream->v_src->get_rect();
+
+		do {
+			error = encoder_video_new(setting.v_encoder_id, &_v_stream->v_enc);
+			if (error != AE_NO)
+				break;
+
+			error = _v_stream->v_enc->init(setting.v_out_width, 
+				setting.v_out_height, 
+				setting.v_frame_rate,
+				setting.v_bit_rate, 
+				setting.v_qb
+			);
+			if (error != AE_NO)
+				break;
+
+			_v_stream->v_enc->registe_cb(
+				std::bind(&muxer_ffmpeg::on_enc_264_data, this, std::placeholders::_1),
+				std::bind(&muxer_ffmpeg::on_enc_264_error, this, std::placeholders::_1)
+			);
+
+			_v_stream->v_sws = new sws_helper();
+			error = _v_stream->v_sws->init(
+				_v_stream->v_src->get_pixel_fmt(),
+				v_rect.right - v_rect.left,
+				v_rect.bottom - v_rect.top,
+				AV_PIX_FMT_YUV420P,
+				setting.v_out_width,
+				setting.v_out_height
+			);
+			if (error != AE_NO)
+				break;
+
+
+			AVCodec *codec = avcodec_find_encoder(_v_stream->v_enc->get_codec_id());
+			if (!codec) {
+				error = AE_FFMPEG_FIND_ENCODER_FAILED;
+				break;
+			}
+
+			_fmt->video_codec = codec->id;
+
+			AVStream *st = avformat_new_stream(_fmt_ctx, codec);
+			if (!st) {
+				error = AE_FFMPEG_NEW_STREAM_FAILED;
+				break;
+			}
+
+			st->codec->codec_id = AV_CODEC_ID_H264;
+			st->codec->bit_rate_tolerance = setting.v_bit_rate;
+			st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
+			st->codec->time_base.den = setting.v_frame_rate;
+			st->codec->time_base.num = 1;
+			st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
+
+			st->codec->coded_width = setting.v_out_width;
+			st->codec->coded_height = setting.v_out_height;
+			st->codec->width = setting.v_out_width;
+			st->codec->height = setting.v_out_height;
+			st->codec->max_b_frames = 0;//NO B Frame
+			st->time_base = { 1,90000 };//fixed?
+			st->avg_frame_rate = av_inv_q(st->codec->time_base);
+
+			if (_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {//without this,normal player can not play,extradata will write with avformat_write_header
+				st->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+				st->codec->extradata_size = _v_stream->v_enc->get_extradata_size();// +AV_INPUT_BUFFER_PADDING_SIZE;
+				st->codec->extradata = (uint8_t*)av_memdup(_v_stream->v_enc->get_extradata(), _v_stream->v_enc->get_extradata_size());
+			}
+
+			_v_stream->st = st;
+
+			_v_stream->setting = setting;
+			//_v_stream->filter = av_bitstream_filter_init("h264_mp4toannexb");
+		} while (0);
+
+		return error;
+	}
+
+	int muxer_ffmpeg::add_audio_stream(const MUX_SETTING_T & setting, record_audio ** source_audios, const int source_audios_nb)
+	{
+		int error = AE_NO;
+		int ret = 0;
+
+		_a_stream = new MUX_STREAM();
+		memset(_a_stream, 0, sizeof(MUX_STREAM));
+
+		_a_stream->a_nb = source_audios_nb;
+		_a_stream->a_filter_aresample = new filter_aresample*[_a_stream->a_nb];
+		_a_stream->a_resamples = new AUDIO_SAMPLE*[_a_stream->a_nb];
+		_a_stream->a_samples = new AUDIO_SAMPLE*[_a_stream->a_nb];
+		_a_stream->a_src = new record_audio*[_a_stream->a_nb];
+		_a_stream->pre_pts = -1;
+
+
+		do {
+			_a_stream->a_enc = new encoder_aac();
+			error = _a_stream->a_enc->init(
+				setting.a_nb_channel,
+				setting.a_sample_rate,
+				setting.a_sample_fmt,
+				setting.a_bit_rate
+			);
+			if (error != AE_NO)
+				break;
+
+			_a_stream->a_enc->registe_cb(
+				std::bind(&muxer_ffmpeg::on_enc_aac_data, this, std::placeholders::_1),
+				std::bind(&muxer_ffmpeg::on_enc_aac_error, this, std::placeholders::_1)
+			);
+
+			for (int i = 0; i < _a_stream->a_nb; i++) {
+
+				_a_stream->a_src[i] = source_audios[i];
+				_a_stream->a_src[i]->registe_cb(
+					std::bind(&muxer_ffmpeg::on_audio_data, this, std::placeholders::_1, std::placeholders::_2),
+					std::bind(&muxer_ffmpeg::on_audio_error, this, std::placeholders::_1, std::placeholders::_2),
+					i
+				);
+
+				_a_stream->a_filter_aresample[i] = new filter_aresample();
+				_a_stream->a_resamples[i] = new AUDIO_SAMPLE({ NULL,0,0 });
+				
+				
+				FILTER_CTX ctx_in = { 0 }, ctx_out = { 0 };
+				ctx_in.time_base = _a_stream->a_src[i]->get_time_base();
+				ctx_in.channel_layout = av_get_default_channel_layout(_a_stream->a_src[i]->get_channel_num());
+				ctx_in.nb_channel = _a_stream->a_src[i]->get_channel_num();
+				ctx_in.sample_fmt = _a_stream->a_src[i]->get_fmt();
+				ctx_in.sample_rate = _a_stream->a_src[i]->get_sample_rate();
+
+				ctx_out.time_base = { 1,AV_TIME_BASE };
+				ctx_out.channel_layout = av_get_default_channel_layout(setting.a_nb_channel);
+				ctx_out.nb_channel = setting.a_nb_channel;
+				ctx_out.sample_fmt = setting.a_sample_fmt;
+				ctx_out.sample_rate = setting.a_sample_rate;
+
+				_a_stream->a_filter_aresample[i]->init(ctx_in, ctx_out, i);
+
+				_a_stream->a_filter_aresample[i]->registe_cb(
+					std::bind(&muxer_ffmpeg::on_filter_aresample_data, this, std::placeholders::_1, std::placeholders::_2),
+					std::bind(&muxer_ffmpeg::on_filter_aresample_error, this, std::placeholders::_1, std::placeholders::_2));
+
+				_a_stream->a_resamples[i]->size = av_samples_get_buffer_size(
+					NULL, setting.a_nb_channel, _a_stream->a_enc->get_nb_samples(), setting.a_sample_fmt, 1);
+				_a_stream->a_resamples[i]->buff = new uint8_t[_a_stream->a_resamples[i]->size];
+				_a_stream->a_samples[i] = new AUDIO_SAMPLE({ NULL,0,0 });
+				_a_stream->a_samples[i]->size = av_samples_get_buffer_size(
+					NULL, _a_stream->a_src[i]->get_channel_num(), _a_stream->a_enc->get_nb_samples(), _a_stream->a_src[i]->get_fmt(), 1);
+				_a_stream->a_samples[i]->buff = new uint8_t[_a_stream->a_samples[i]->size];
+			}
+
+			if (_a_stream->a_nb >= 2) {
+				_a_stream->a_filter_amix = new am::filter_amix();
+				error = _a_stream->a_filter_amix->init(
+				{
+					NULL,NULL,
+					_a_stream->a_src[0]->get_time_base(),
+					_a_stream->a_src[0]->get_sample_rate(),
+					_a_stream->a_src[0]->get_fmt(),
+					_a_stream->a_src[0]->get_channel_num(),
+					av_get_default_channel_layout(_a_stream->a_src[0]->get_channel_num())
+				},
+				{
+					NULL,NULL,
+					_a_stream->a_src[1]->get_time_base(),
+					_a_stream->a_src[1]->get_sample_rate(),
+					_a_stream->a_src[1]->get_fmt(),
+					_a_stream->a_src[1]->get_channel_num(),
+					av_get_default_channel_layout(_a_stream->a_src[1]->get_channel_num())
+				},
+				{
+					NULL,NULL,
+					{ 1,AV_TIME_BASE },
+					setting.a_sample_rate,
+					setting.a_sample_fmt,
+					setting.a_nb_channel,
+					av_get_default_channel_layout(setting.a_nb_channel)
+				}
+				);
+
+				if (error != AE_NO) {
+					break;
+				}
+
+				_a_stream->a_filter_amix->registe_cb(
+					std::bind(&muxer_ffmpeg::on_filter_amix_data, this, std::placeholders::_1, std::placeholders::_2),
+					std::bind(&muxer_ffmpeg::on_filter_amix_error, this, std::placeholders::_1, std::placeholders::_2)
+				);
+			}
+
+			AVCodec *codec = avcodec_find_encoder(_a_stream->a_enc->get_codec_id());
+			if (!codec) {
+				error = AE_FFMPEG_FIND_ENCODER_FAILED;
+				break;
+			}
+
+			_fmt->audio_codec = _a_stream->a_enc->get_codec_id();
+
+			AVStream *st = avformat_new_stream(_fmt_ctx, codec);
+			if (!st) {
+				error = AE_FFMPEG_NEW_STREAM_FAILED;
+				break;
+			}
+
+			av_dict_set(&st->metadata, "title", "Track1", 0);
+
+			st->time_base = { 1,setting.a_sample_rate };
+
+			st->codec->bit_rate = setting.a_bit_rate;
+			st->codec->channels = setting.a_nb_channel;
+			st->codec->sample_rate = setting.a_sample_rate;
+			st->codec->sample_fmt = setting.a_sample_fmt;
+			st->codec->time_base = { 1,setting.a_sample_rate };
+			st->codec->channel_layout = av_get_default_channel_layout(setting.a_nb_channel);
+
+			if (_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {//without this,normal player can not play
+				st->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+				st->codec->extradata_size = _a_stream->a_enc->get_extradata_size();// +AV_INPUT_BUFFER_PADDING_SIZE;
+				st->codec->extradata = (uint8_t*)av_memdup(_a_stream->a_enc->get_extradata(), _a_stream->a_enc->get_extradata_size());
+			}
+
+			_a_stream->st = st;		
+
+			_a_stream->setting = setting;
+			_a_stream->filter = av_bitstream_filter_init("aac_adtstoasc");
+
+		} while (0);
+
+		return error;
+	}
+
+	int muxer_ffmpeg::open_output(const char * output_file, const MUX_SETTING_T & setting)
+	{
+		int error = AE_NO;
+		int ret = 0;
+
+		do {
+			if (!(_fmt->flags & AVFMT_NOFILE)) {
+				ret = avio_open(&_fmt_ctx->pb, output_file, AVIO_FLAG_WRITE);
+				if (ret < 0) {
+					error = AE_FFMPEG_OPEN_IO_FAILED;
+					break;
+				}
+			}
+
+			AVDictionary* opt = NULL;
+			av_dict_set_int(&opt, "video_track_timescale", _v_stream->setting.v_frame_rate, 0);
+
+			//ret = avformat_write_header(_fmt_ctx, &opt);//no need to set this
+			ret = avformat_write_header(_fmt_ctx, NULL);
+
+			av_dict_free(&opt);
+
+			if (ret < 0) {
+				error = AE_FFMPEG_WRITE_HEADER_FAILED;
+				break;
+			}
+		} while (0);
+
+		return error;
+	}
+
+	void muxer_ffmpeg::cleanup_video()
+	{
+		if (!_v_stream)
+			return;
+
+		if (_v_stream->v_enc)
+			delete _v_stream->v_enc;
+
+		if (_v_stream->v_sws)
+			delete _v_stream->v_sws;
+
+		delete _v_stream;
+
+		_v_stream = nullptr;
+	}
+
+	void muxer_ffmpeg::cleanup_audio()
+	{
+		if (!_a_stream)
+			return;
+
+		if (_a_stream->a_enc)
+			delete _a_stream->a_enc;
+
+		if (_a_stream->a_filter_amix)
+			delete _a_stream->a_filter_amix;
+
+		if (_a_stream->a_nb) {
+			for (int i = 0; i < _a_stream->a_nb; i++) {
+				if (_a_stream->a_filter_aresample && _a_stream->a_filter_aresample[i])
+					delete _a_stream->a_filter_aresample[i];
+
+				if (_a_stream->a_samples && _a_stream->a_samples[i]) {
+					delete[] _a_stream->a_samples[i]->buff;
+					delete _a_stream->a_samples[i];
+				}
+
+				if (_a_stream->a_resamples && _a_stream->a_resamples[i]) {
+					delete[] _a_stream->a_resamples[i]->buff;
+					delete _a_stream->a_resamples[i];
+				}
+			}
+
+			if (_a_stream->a_filter_aresample)
+				delete[] _a_stream->a_filter_aresample;
+
+			if (_a_stream->a_samples)
+				delete[] _a_stream->a_samples;
+
+			if (_a_stream->a_resamples)
+				delete[] _a_stream->a_resamples;
+		}
+
+		delete _a_stream;
+
+		_a_stream = nullptr;
+	}
+
+	void muxer_ffmpeg::cleanup()
+	{
+		cleanup_video();
+		cleanup_audio();
+
+		if (_fmt && !(_fmt->flags & AVFMT_NOFILE))
+			avio_closep(&_fmt_ctx->pb);
+
+		if (_fmt_ctx) {
+			avformat_free_context(_fmt_ctx);
+		}
+
+		_fmt_ctx = NULL;
+		_fmt = NULL;
+
+		_inited = false;
+	}
+
+	uint64_t muxer_ffmpeg::get_current_time()
+	{
+		std::lock_guard<std::mutex> lock(_time_mutex);
+
+		return av_gettime_relative();
+	}
+
+	int muxer_ffmpeg::write_video(AVPacket *packet)
+	{
+		//must lock here,coz av_interleaved_write_frame will push packet into a queue,and is not thread safe
+		std::lock_guard<std::mutex> lock(_mutex);
+
+		packet->stream_index = _v_stream->st->index;
+
+		/*packet->pts = av_rescale_q_rnd(packet->pts, 
+			_v_stream->v_src->get_time_base(), 
+			{ 1,AV_TIME_BASE }, 
+			(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
+
+		// make audio and video use one clock
+		if (_v_stream->pre_pts == (uint64_t)-1) {
+			_v_stream->pre_pts = packet->pts;
+		}*/
+
+		// scale ts with timebase of base_time
+		av_packet_rescale_ts(packet, _v_stream->v_src->get_time_base(), { 1,AV_TIME_BASE });
+
+		// make audio and video use one clock
+		packet->pts = packet->pts - _base_time;
+		packet->dts = packet->pts;//make sure that dts is equal to pts
+
+		av_packet_rescale_ts(packet, { 1,AV_TIME_BASE }, _v_stream->st->time_base);
+
+
+		al_debug("V:%lld", packet->pts);
+		
+		av_assert0(packet->data != NULL);
+
+		int ret = av_interleaved_write_frame(_fmt_ctx, packet);//no need to unref packet,this will be auto unref
+
+		if (ret != 0) {
+			al_fatal("write video frame error:%d", ret);
+		}
+
+		return ret;
+	}
+
+	int muxer_ffmpeg::write_audio(AVPacket *packet)
+	{
+		std::lock_guard<std::mutex> lock(_mutex);
+
+		packet->stream_index = _a_stream->st->index;
+
+		AVRational src_timebase = { 1,1 };
+
+		if (_a_stream->a_filter_amix != nullptr) {
+			src_timebase = _a_stream->a_filter_amix->get_time_base();
+		}
+		else {
+			src_timebase = _a_stream->a_filter_aresample[0]->get_time_base();
+		}
+
+		/*packet->pts = av_rescale_q_rnd(packet->pts, 
+			src_timebase, 
+			{ 1,AV_TIME_BASE }, 
+			(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
+
+		
+		if (_v_stream->pre_pts == (uint64_t)-1) {
+			_v_stream->pre_pts = packet->pts;
+		}*/
+
+		// scale ts with timebase of base_time
+		av_packet_rescale_ts(packet, src_timebase, { 1,AV_TIME_BASE });
+
+		// make audio and video use one clock
+		packet->pts = packet->pts - _base_time;
+		packet->dts = packet->pts;//make sure that dts is equal to pts
+
+		av_packet_rescale_ts(packet, { 1,AV_TIME_BASE }, _a_stream->st->time_base);
+		
+
+		al_debug("A:%lld %lld", packet->pts, packet->dts);
+
+		av_assert0(packet->data != NULL);
+
+		int ret = av_interleaved_write_frame(_fmt_ctx, packet);//no need to unref packet,this will be auto unref
+		if (ret != 0) {
+			al_fatal("write audio frame error:%d", ret);
+		}
+
+		return ret;
+	}
+}

+ 100 - 0
libs/Recorder/muxer_ffmpeg.h

@@ -0,0 +1,100 @@
+#ifndef MUXER_FFMPEG
+#define MUXER_FFMPEG
+
+#include <thread>
+#include <list>
+#include <functional>
+#include <math.h>
+#include <mutex>
+
+#include "muxer_file.h"
+
+#include "headers_ffmpeg.h"
+
+namespace am {
+
+	class muxer_ffmpeg : public muxer_file
+	{
+	public:
+		muxer_ffmpeg();
+		~muxer_ffmpeg();
+
+		int init(
+			const char *output_file,
+			record_desktop *source_desktop,
+			record_audio ** source_audios,
+			const int source_audios_nb,
+			const MUX_SETTING_T &setting
+		);
+
+		int start();
+		int stop();
+
+		int pause();
+		int resume();
+
+	private:
+		void on_desktop_data(AVFrame *frame);
+
+		void on_desktop_error(int error);
+
+		void on_audio_data(AVFrame *frame, int index);
+
+		void on_audio_error(int error, int index);
+
+		void on_filter_amix_data(AVFrame *frame, int index);
+
+		void on_filter_amix_error(int error, int index);
+
+		void on_filter_aresample_data(AVFrame * frame,int index);
+
+		void on_filter_aresample_error(int error, int index);
+
+
+
+		void on_enc_264_data(AVPacket *packet);
+
+		void on_enc_264_error(int error);
+
+		void on_enc_aac_data(AVPacket *packet);
+
+		void on_enc_aac_error(int error);
+
+
+
+		int alloc_oc(const char *output_file, const MUX_SETTING_T &setting);
+
+		int add_video_stream(const MUX_SETTING_T &setting, record_desktop *source_desktop);
+
+		int add_audio_stream(const MUX_SETTING_T &setting, record_audio ** source_audios, const int source_audios_nb);
+
+		int open_output(const char *output_file, const MUX_SETTING_T &setting);
+
+		void cleanup_video();
+		void cleanup_audio();
+		void cleanup();
+
+		uint64_t get_current_time();
+
+		int write_video(AVPacket *packet);
+
+		int write_audio(AVPacket *packet);
+
+	private:
+		struct MUX_STREAM_T *_v_stream, *_a_stream;
+
+		AVOutputFormat *_fmt;
+		AVFormatContext *_fmt_ctx;
+
+		int64_t _base_time;
+
+		char ff_error[4096];
+
+		std::mutex _mutex;
+		std::mutex _time_mutex;
+	};
+}
+
+
+
+#endif

+ 26 - 0
libs/Recorder/muxer_file.cpp

@@ -0,0 +1,26 @@
+#include "muxer_file.h"
+
+namespace am {
+
+	muxer_file::muxer_file()
+	{
+		_on_yuv_data = nullptr;
+
+		_inited = false;
+		_running = false;
+		_paused = false;
+
+		_have_a = false;
+		_have_v = false;
+
+		_preview_enabled = false;
+
+		_output_file = "";
+	}
+
+
+	muxer_file::~muxer_file()
+	{
+	}
+
+}

+ 59 - 0
libs/Recorder/muxer_file.h

@@ -0,0 +1,59 @@
+#pragma once
+
+#include <functional>
+#include <atomic>
+#include <string>
+
+namespace am {
+
+	class record_audio;
+	class record_desktop;
+
+	struct MUX_STREAM_T;
+	struct MUX_SETTING_T;
+
+	typedef std::function<void(const uint8_t *data, int size, int width, int height,int type)> cb_yuv_data;
+
+	class muxer_file
+	{
+	public:
+		muxer_file();
+		virtual ~muxer_file();
+
+		virtual int init(
+			const char *output_file,
+			record_desktop *source_desktop,
+			record_audio ** source_audios,
+			const int source_audios_nb,
+			const MUX_SETTING_T &setting
+		) = 0;
+
+		virtual int start() = 0;
+		virtual int stop() = 0;
+
+		virtual int pause() = 0;
+		virtual int resume() = 0;
+
+		inline void registe_yuv_data(cb_yuv_data on_yuv_data) {
+			_on_yuv_data = on_yuv_data;
+		}
+
+		inline void set_preview_enabled(bool enable) {
+			_preview_enabled = enable;
+		}
+
+	protected:
+		cb_yuv_data _on_yuv_data;
+
+		std::atomic_bool _inited;
+		std::atomic_bool _running;
+		std::atomic_bool _paused;
+		std::atomic_bool _preview_enabled;
+
+		bool _have_v, _have_a;
+
+		std::string _output_file;
+	};
+
+
+}

+ 28 - 0
libs/Recorder/record_audio.cpp

@@ -0,0 +1,28 @@
+#include "record_audio.h"
+
+namespace am {
+	record_audio::record_audio()
+	{
+		_running = false;
+		_inited = false;
+		_paused = false;
+
+		_sample_rate = 48000;
+		_bit_rate = 3072000;
+		_channel_num = 2;
+		_channel_layout = av_get_default_channel_layout(_channel_num);
+		_bit_per_sample = _bit_rate / _sample_rate / _channel_num;
+		_fmt = AV_SAMPLE_FMT_FLT;
+		_on_data = nullptr;
+		_on_error = nullptr;
+
+		_device_name = "";
+		_device_id = "";
+		_is_input = false;
+	}
+
+	record_audio::~record_audio()
+	{
+
+	}
+}

+ 97 - 0
libs/Recorder/record_audio.h

@@ -0,0 +1,97 @@
+#ifndef RECORD_AUDIO
+#define RECORD_AUDIO
+
+#include "record_audio_define.h"
+
+#include "headers_ffmpeg.h"
+
+#include <atomic>
+#include <thread>
+#include <functional>
+#include <string>
+
+namespace am {
+	typedef std::function<void(AVFrame *packet, int index)> cb_audio_data;
+	typedef std::function<void(int,int)> cb_audio_error;
+
+	class record_audio
+	{
+	public:
+		record_audio();
+		virtual ~record_audio();
+
+		virtual int init(const std::string &device_name,
+			const std::string &device_id,
+			bool is_input) = 0;
+
+		virtual int start() = 0;
+		
+		virtual int pause() = 0;
+		
+		virtual int resume() = 0;
+		
+		virtual int stop() = 0;
+
+		virtual const AVRational get_time_base() = 0;
+
+		virtual int64_t get_start_time() = 0;
+
+	public:
+		inline bool is_recording() { return _running; }
+		
+		inline int get_sample_rate() { return _sample_rate; }
+		
+		inline int get_bit_rate() { return _bit_rate; }
+
+		inline int get_bit_per_sample() { return _bit_per_sample; }
+
+		inline int get_channel_num() { return _channel_num; }
+
+		inline int64_t av_get_channel_layout() { return _channel_layout; }
+		
+		inline AVSampleFormat get_fmt() { return _fmt; }
+
+		inline const std::string & get_device_name() { return _device_name; }
+
+		inline void registe_cb(
+			cb_audio_data on_data,
+			cb_audio_error on_error,
+			int cb_extra_index = -1) {
+			_on_data = on_data;
+			_on_error = on_error;
+			_cb_extra_index = cb_extra_index;
+		}
+
+	protected:
+		std::atomic_bool _running;
+		std::atomic_bool _inited;
+		std::atomic_bool _paused;
+
+		std::thread _thread;
+
+		int _sample_rate;
+		
+		int _bit_rate;
+
+		int _channel_num;
+
+		int64_t _channel_layout;
+
+		int _bit_per_sample;
+		
+		AVSampleFormat _fmt;
+		
+		std::string _device_name;
+		std::string _device_id;
+
+		bool _is_input;
+
+		cb_audio_error _on_error;
+		
+		cb_audio_data _on_data;
+
+		int _cb_extra_index;
+	};
+}
+
+#endif // !RECORD_AUDIO

+ 18 - 0
libs/Recorder/record_audio_define.h

@@ -0,0 +1,18 @@
+#ifndef RECORD_AUDIO_DEFINE
+#define RECORD_AUDIO_DEFINE
+
+#include <stdint.h>
+
+/**
+* Record type
+*
+*/
+typedef enum {
+	AT_AUDIO_NO = 0,
+	AT_AUDIO_WAVE,    ///< wave api
+	AT_AUDIO_WAS,     ///< wasapi(core audio)
+	AT_AUDIO_DSHOW,   ///< direct show api
+	AT_AUDIO_FFMPEG,  ///< ffmpeg api
+}RECORD_AUDIO_TYPES;
+
+#endif // !RECORD_AUDIO_DEFINE

+ 244 - 0
libs/Recorder/record_audio_dshow.cpp

@@ -0,0 +1,244 @@
+#include "record_audio_dshow.h"
+
+#include "log_helper.h"
+#include "error_define.h"
+
+namespace am {
+
+	record_audio_dshow::record_audio_dshow()
+	{
+		av_register_all();
+		avdevice_register_all();
+
+		_fmt_ctx = NULL;
+		_input_fmt = NULL;
+		_codec_ctx = NULL;
+		_codec = NULL;
+
+		_stream_index = -1;
+	}
+
+
+	record_audio_dshow::~record_audio_dshow()
+	{
+		stop();
+		cleanup();
+	}
+
+	int record_audio_dshow::init(const std::string & device_name, const std::string &device_id, bool is_input)
+	{
+		int error = AE_NO;
+		int ret = 0;
+
+		if (_inited == true)
+			return error;
+
+		do {
+
+			_device_name = device_name;
+			_device_id = device_id;
+			_is_input = is_input;
+
+			_input_fmt = av_find_input_format("dshow");
+			if (!_input_fmt) {
+				error = AE_FFMPEG_FIND_INPUT_FMT_FAILED;
+				break;
+			}
+
+			_fmt_ctx = avformat_alloc_context();
+			ret = avformat_open_input(&_fmt_ctx, _device_name.c_str(), _input_fmt, NULL);
+			if (ret != 0) {
+				error = AE_FFMPEG_OPEN_INPUT_FAILED;
+				break;
+			}
+
+
+			ret = avformat_find_stream_info(_fmt_ctx, NULL);
+			if (ret < 0) {
+				error = AE_FFMPEG_FIND_STREAM_FAILED;
+				break;
+			}
+
+			int stream_index = -1;
+			for (int i = 0; i < _fmt_ctx->nb_streams; i++) {
+				if (_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+					stream_index = i;
+					break;
+				}
+			}
+
+			if (stream_index == -1) {
+				error = AE_FFMPEG_FIND_STREAM_FAILED;
+				break;
+			}
+
+			_stream_index = stream_index;
+			_codec_ctx = _fmt_ctx->streams[stream_index]->codec;
+			_codec = avcodec_find_decoder(_codec_ctx->codec_id);
+			if (_codec == NULL) {
+				error = AE_FFMPEG_FIND_DECODER_FAILED;
+				break;
+			}
+
+			ret = avcodec_open2(_codec_ctx, _codec, NULL);
+			if (ret != 0) {
+				error = AE_FFMPEG_OPEN_CODEC_FAILED;
+				break;
+			}
+
+			_inited = true;
+
+			_sample_rate = _codec_ctx->sample_rate;
+			_bit_rate = _codec_ctx->bit_rate;
+			_bit_per_sample = _codec_ctx->bits_per_coded_sample;
+			_channel_num = _codec_ctx->channels;
+			_fmt = _codec_ctx->sample_fmt;
+
+			_inited = true;
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("%s,error:%d", err2str(error), ret);
+			cleanup();
+		}
+
+		return error;
+	}
+
+	int record_audio_dshow::start()
+	{
+		if (_running == true) {
+			al_warn("record audio dshow is already running");
+			return AE_NO;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+
+		_running = true;
+		_thread = std::thread(std::bind(&record_audio_dshow::record_loop, this));
+
+		return AE_NO;
+	}
+
+	int record_audio_dshow::pause()
+	{
+		return 0;
+	}
+
+	int record_audio_dshow::resume()
+	{
+		return 0;
+	}
+
+	int record_audio_dshow::stop()
+	{
+		_running = false;
+		if (_thread.joinable())
+			_thread.join();
+
+		return AE_NO;
+	}
+
+	const AVRational record_audio_dshow::get_time_base()
+	{
+		if (_inited && _fmt_ctx && _stream_index != -1) {
+			return _fmt_ctx->streams[_stream_index]->time_base;
+		}
+		else {
+			return{ 1,AV_TIME_BASE };
+		}
+	}
+
+	int64_t record_audio_dshow::get_start_time()
+	{
+		return _fmt_ctx->streams[_stream_index]->start_time;
+	}
+
+	int record_audio_dshow::decode(AVFrame * frame, AVPacket * packet)
+	{
+		int ret = avcodec_send_packet(_codec_ctx, packet);
+		if (ret < 0) {
+			al_error("avcodec_send_packet failed:%d", ret);
+
+			return AE_FFMPEG_DECODE_FRAME_FAILED;
+		}
+
+		while (ret >= 0)
+		{
+			ret = avcodec_receive_frame(_codec_ctx, frame);
+			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+				break;
+			}
+
+			if (ret < 0) {
+				return AE_FFMPEG_READ_FRAME_FAILED;
+			}
+
+			if (ret == 0 && _on_data)
+				_on_data(frame, _cb_extra_index);
+
+			av_frame_unref(frame);//need to do this? avcodec_receive_frame said will call unref before receive
+		}
+
+		return AE_NO;
+	}
+
+	void record_audio_dshow::record_loop()
+	{
+		int ret = 0;
+
+		AVPacket *packet = av_packet_alloc();
+
+		AVFrame *frame = av_frame_alloc();
+
+		while (_running == true) {
+			av_init_packet(packet);
+
+			ret = av_read_frame(_fmt_ctx, packet);
+
+			if (ret < 0) {
+				if (_on_error) _on_error(AE_FFMPEG_READ_FRAME_FAILED, _cb_extra_index);
+				break;
+			}
+
+			if (packet->stream_index == _stream_index) {
+				ret = decode(frame, packet);
+				if (ret != AE_NO) {
+					if (_on_error) _on_error(AE_FFMPEG_DECODE_FRAME_FAILED, _cb_extra_index);
+
+					al_fatal("decode pcm packet failed:%d", ret);
+					break;
+				}
+			}
+
+			av_packet_unref(packet);
+		}
+
+		//flush packet left in decoder
+		decode(frame, NULL);
+
+		av_packet_free(&packet);
+		av_frame_free(&frame);
+	}
+
+	void record_audio_dshow::cleanup()
+	{
+		if (_codec_ctx)
+			avcodec_close(_codec_ctx);
+
+		if (_fmt_ctx)
+			avformat_close_input(&_fmt_ctx);
+
+		_fmt_ctx = NULL;
+		_input_fmt = NULL;
+		_codec_ctx = NULL;
+		_codec = NULL;
+
+		_stream_index = -1;
+		_inited = false;
+	}
+
+}

+ 43 - 0
libs/Recorder/record_audio_dshow.h

@@ -0,0 +1,43 @@
+#pragma once
+
+#include <string>
+
+#include "record_audio.h"
+
+namespace am {
+
+	class record_audio_dshow :public record_audio
+	{
+	public:
+		record_audio_dshow();
+		~record_audio_dshow();
+
+		virtual int init(const std::string &device_name,
+			const std::string &device_id,
+			bool is_input);
+
+		virtual int start();
+
+		virtual int pause();
+
+		virtual int resume();
+
+		virtual int stop();
+
+		virtual const AVRational get_time_base();
+		virtual int64_t get_start_time();
+
+	private:
+		int decode(AVFrame *frame, AVPacket *packet);
+		void record_loop();
+		void cleanup();
+	private:
+		AVFormatContext *_fmt_ctx;
+		AVInputFormat *_input_fmt;
+		AVCodecContext *_codec_ctx;
+		AVCodec *_codec;
+
+		int _stream_index;
+	};
+
+}

+ 43 - 0
libs/Recorder/record_audio_factory.cpp

@@ -0,0 +1,43 @@
+#include "record_audio_factory.h"
+#include "record_audio_wasapi.h"
+#include "record_audio_dshow.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+
+int record_audio_new(RECORD_AUDIO_TYPES type,am::record_audio **recorder)
+{
+	int err = AE_NO;
+
+	switch (type)
+	{
+	case AT_AUDIO_WAVE:
+		err = AE_UNSUPPORT;
+		break;
+	case AT_AUDIO_WAS:
+		*recorder = (am::record_audio *)new am::record_audio_wasapi();
+		break;
+	case AT_AUDIO_DSHOW:
+		*recorder = (am::record_audio *)new am::record_audio_dshow();
+		break;
+	case AT_AUDIO_FFMPEG:
+		err = AE_UNSUPPORT;
+		break;
+	default:
+		err = AE_UNSUPPORT;
+		break;
+	}
+
+	return err;
+}
+
+void record_audio_destroy(am::record_audio ** recorder)
+{
+	if (*recorder != nullptr) {
+		(*recorder)->stop();
+		delete *recorder;
+	}
+
+	*recorder = nullptr;
+}

+ 19 - 0
libs/Recorder/record_audio_factory.h

@@ -0,0 +1,19 @@
+#ifndef RECORD_AUDIO_FACTORY
+#define RECORD_AUDIO_FACTORY
+
+#include "record_audio.h"
+
+/**
+*  Create a new audio record context
+*
+*/
+int record_audio_new(RECORD_AUDIO_TYPES type, am::record_audio **recorder);
+
+/**
+* Destroy audio record context
+*
+*/
+void record_audio_destroy(am::record_audio ** recorder);
+
+#endif // !RECORD_AUDIO_FACTORY
+

+ 615 - 0
libs/Recorder/record_audio_wasapi.cpp

@@ -0,0 +1,615 @@
+#include "record_audio_wasapi.h"
+
+#include <string>
+
+#include "error_define.h"
+#include "system_error.h"
+#include "utils_string.h"
+
+#include "log_helper.h"
+
+#ifdef _WIN32
+
+#define NS_PER_SEC 1000000000
+#define REFTIMES_PER_SEC  NS_PER_SEC/100            //100ns per buffer unit
+
+#endif // _WIN32
+
+
+namespace am {
+
+	record_audio_wasapi::record_audio_wasapi()
+	{
+		_co_inited = false;
+
+		HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
+		DWORD err = GetLastError();
+		if (hr != S_OK && err != S_OK)
+			al_error("%s,error:%s",err2str(AE_CO_INITED_FAILED), system_error::error2str(err).c_str());
+
+		_co_inited = (hr == S_OK || hr == S_FALSE);//if already initialize will return S_FALSE
+
+		_is_default = false;
+
+		_wfex = NULL;
+		_enumerator = nullptr;
+		_device = nullptr;
+		_capture_client = nullptr;
+		_capture = nullptr;
+		_render = nullptr;
+		_render_client = nullptr;
+
+		_capture_sample_count = 0;
+		_render_sample_count = 0;
+
+		_ready_event = NULL;
+		_stop_event = NULL;
+		_render_event = NULL;
+
+		_use_device_ts = false;
+
+		_start_time = 0;
+	}
+
+	record_audio_wasapi::~record_audio_wasapi()
+	{
+		stop();
+
+		clean_wasapi();
+
+		if(_co_inited == true)
+			CoUninitialize();
+	}
+
+	void get_device_info(IMMDevice *device) {
+		HRESULT resSample;
+		IPropertyStore *store = nullptr;
+		PWAVEFORMATEX deviceFormatProperties;
+		PROPVARIANT prop;
+		resSample = device->OpenPropertyStore(STGM_READ, &store);
+		if (!FAILED(resSample)) {
+			resSample =
+				store->GetValue(PKEY_AudioEngine_DeviceFormat, &prop);
+			if (!FAILED(resSample)) {
+				deviceFormatProperties =
+					(PWAVEFORMATEX)prop.blob.pBlobData;
+				std::string device_sample = std::to_string(
+					deviceFormatProperties->nSamplesPerSec);
+			}
+		}
+	}
+
+#define KSAUDIO_SPEAKER_2POINT1     (KSAUDIO_SPEAKER_STEREO|SPEAKER_LOW_FREQUENCY)
+
+#define OBS_KSAUDIO_SPEAKER_4POINT1 \
+	(KSAUDIO_SPEAKER_SURROUND | SPEAKER_LOW_FREQUENCY)
+
+	int64_t record_audio_wasapi::convert_layout(DWORD layout, WORD channels)
+	{
+		switch (layout) {
+		case KSAUDIO_SPEAKER_2POINT1:
+			return AV_CH_LAYOUT_SURROUND;
+		case KSAUDIO_SPEAKER_SURROUND:
+			return AV_CH_LAYOUT_4POINT0;
+		case OBS_KSAUDIO_SPEAKER_4POINT1:
+			return AV_CH_LAYOUT_4POINT1;
+		case KSAUDIO_SPEAKER_5POINT1_SURROUND:
+			return AV_CH_LAYOUT_5POINT1_BACK;
+		case KSAUDIO_SPEAKER_7POINT1_SURROUND:
+			return AV_CH_LAYOUT_7POINT1;
+		}
+
+		return av_get_default_channel_layout(channels);
+	}
+
+	void record_audio_wasapi::init_format(WAVEFORMATEX * wfex)
+	{
+		DWORD layout = 0;
+
+		if (wfex->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+			WAVEFORMATEXTENSIBLE *ext = (WAVEFORMATEXTENSIBLE *)wfex;
+			layout = ext->dwChannelMask;
+		}
+
+		_channel_layout = convert_layout(layout, wfex->nChannels);
+		_sample_rate = wfex->nSamplesPerSec;
+		_bit_rate = wfex->nAvgBytesPerSec;
+		_bit_per_sample = wfex->wBitsPerSample;
+		_channel_num = wfex->nChannels;
+
+		//wasapi is always flt
+		_fmt = AV_SAMPLE_FMT_FLT;
+	}
+
+	int record_audio_wasapi::init_render()
+	{
+		int error = AE_NO;
+		HRESULT res = S_OK;
+
+		do {
+			res = _device->Activate(__uuidof(IAudioClient),
+				CLSCTX_ALL, 
+				nullptr,
+				(void **)&_render_client
+			);
+
+			if (FAILED(res)) {
+				error = AE_CO_ACTIVE_DEVICE_FAILED;
+				break;
+			}
+
+			WAVEFORMATEX *wfex;
+			res = _render_client->GetMixFormat(&wfex);
+			if (FAILED(res)) {
+				error = AE_CO_GET_FORMAT_FAILED;
+				break;
+			}
+
+			res = _render_client->Initialize(AUDCLNT_SHAREMODE_SHARED,
+				AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
+				REFTIMES_PER_SEC,
+				0, wfex, nullptr);
+
+			CoTaskMemFree(wfex);
+			if (FAILED(res)) {
+				error = AE_CO_AUDIOCLIENT_INIT_FAILED;
+				break;
+			}
+
+			/* Silent loopback fix. Prevents audio stream from stopping and */
+			/* messing up timestamps and other weird glitches during silence */
+			/* by playing a silent sample all over again. */
+
+			res = _render_client->GetService(__uuidof(IAudioRenderClient),
+				(void **)&_render);
+			if (FAILED(res)) {
+				error = AE_CO_GET_CAPTURE_FAILED;
+				break;
+			}
+
+			_render_event = CreateEvent(NULL, FALSE, FALSE, NULL);
+			if (!_render_event) {
+				error = AE_CO_CREATE_EVENT_FAILED;
+				break;
+			}
+			
+			res = _render_client->SetEventHandle(_render_event);
+			if (FAILED(res)) {
+				error = AE_CO_SET_EVENT_FAILED;
+				break;
+			}
+
+			//pre fill a single silent buffer
+			res = _render_client->GetBufferSize(&_render_sample_count);
+			if (FAILED(res)) {
+				error = AE_CO_GET_VALUE_FAILED;
+				break;
+			}
+
+			uint8_t *buffer = NULL;
+			res = _render->GetBuffer(_render_sample_count, &buffer);
+			if (FAILED(res)) {
+				error = AE_CO_GET_VALUE_FAILED;
+				break;
+			}
+
+			res = _render->ReleaseBuffer(_render_sample_count, AUDCLNT_BUFFERFLAGS_SILENT);
+			if (FAILED(res)) {
+				error = AE_CO_RELEASE_BUFFER_FAILED;
+				break;
+			}
+		} while (0);
+
+		if (error != AE_NO)
+			al_error("init render failed(%ld), %s,lasterror:%lu", res, err2str(error), GetLastError());
+
+		return error;
+	}
+
+	int record_audio_wasapi::init(const std::string & device_name, const std::string &device_id, bool is_input)
+	{
+		int error = AE_NO;
+		HRESULT hr = S_OK;
+
+		al_info("wasapi start to initialize in %s mode with: %s",
+			is_input ? "input" : "output", 
+			device_name.c_str());
+
+		if (_co_inited == false) {
+			return AE_CO_INITED_FAILED;
+		}
+
+		if (_inited == true) {
+			return AE_NO;
+		}
+
+		_device_name = device_name;
+		_device_id = device_id;
+		_is_input = is_input;
+		_is_default = (utils_string::ascii_utf8(DEFAULT_AUDIO_INOUTPUT_ID).compare(_device_id) == 0);
+
+		do {
+			hr = CoCreateInstance(
+				__uuidof(MMDeviceEnumerator),
+				NULL,
+				CLSCTX_ALL,
+				__uuidof(IMMDeviceEnumerator),
+				(void **)&_enumerator);
+
+			if (FAILED(hr)) {
+				error = AE_CO_CREATE_FAILED;
+				break;
+			}
+
+			if (_is_default) {
+				hr = _enumerator->GetDefaultAudioEndpoint(
+					is_input ? eCapture : eRender,
+					is_input ? eCommunications : eConsole, &_device);
+			}
+			else {
+				hr = _enumerator->GetDevice(utils_string::utf8_unicode(_device_id).c_str(), &_device);
+			}
+
+			if (hr != S_OK) {
+				error = AE_CO_GETENDPOINT_FAILED;
+				break;
+			}
+
+			get_device_info(_device);
+
+			hr = _device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&_capture_client);
+			if (hr != S_OK) {
+				error = AE_CO_ACTIVE_DEVICE_FAILED;
+				break;
+			}
+			
+			hr = _capture_client->GetMixFormat(&_wfex);
+			if (hr != S_OK) {
+				error = AE_CO_GET_FORMAT_FAILED;
+				break;
+			}
+
+			init_format(_wfex);
+
+			DWORD flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+			if (_is_input == false)
+				flags |= AUDCLNT_STREAMFLAGS_LOOPBACK;
+
+			hr = _capture_client->Initialize(
+				AUDCLNT_SHAREMODE_SHARED,
+				flags,
+				REFTIMES_PER_SEC,
+				0,
+				_wfex,
+				NULL);
+
+			// AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED
+			// https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nf-audioclient-iaudioclient-initialize
+			if (hr != S_OK) {
+				error = AE_CO_AUDIOCLIENT_INIT_FAILED;
+				break;
+			}
+
+			//For ouotput mode,ready event will not signal when there is nothing rendering
+			//We run a render thread and render silent pcm data all time
+			if (!_is_input) {
+				error = init_render();
+				if (error != AE_NO)
+					break;
+			}
+
+			hr = _capture_client->GetBufferSize(&_capture_sample_count);
+			if (hr != S_OK) {
+				error = AE_CO_GET_VALUE_FAILED;
+				break;
+			}
+
+
+			hr = _capture_client->GetService(__uuidof(IAudioCaptureClient), (void**)&_capture);
+			if (hr != S_OK) {
+				error = AE_CO_GET_CAPTURE_FAILED;
+				break;
+			}
+
+			_ready_event = CreateEvent(NULL, FALSE, FALSE, NULL);
+			if (!_ready_event) {
+				error = AE_CO_CREATE_EVENT_FAILED;
+				break;
+			}
+
+			_stop_event = CreateEvent(NULL, TRUE, FALSE, NULL);
+			if (!_stop_event) {
+				error = AE_CO_CREATE_EVENT_FAILED;
+				break;
+			}
+
+			hr = _capture_client->SetEventHandle(_ready_event);
+			if (hr != S_OK) {
+				error = AE_CO_SET_EVENT_FAILED;
+				break;
+			}
+
+			_inited = true;
+
+		} while (0);
+
+		if (error != AE_NO) {
+			al_error("wasapi initialize failed,%s,error:%lu,hr:%lld",
+				err2str(error), GetLastError(), hr);
+			clean_wasapi();
+		}
+
+		return error;
+	}
+
+	int record_audio_wasapi::start()
+	{
+		if (_running == true) {
+			al_warn("audio record is already running");
+			return AE_NO;
+		}
+
+		if (_inited == false)
+			return AE_NEED_INIT;
+
+		HRESULT hr = S_OK;
+		
+		
+		if (!_is_input) {
+			hr = _render_client->Start();
+			if (FAILED(hr)) {
+				al_error("%s,error:%lu", err2str(AE_CO_START_FAILED), GetLastError());
+				return AE_CO_START_FAILED;
+			}
+		}
+		
+		
+		hr = _capture_client->Start();
+		if (hr != S_OK) {
+			al_error("%s,error:%lu", err2str(AE_CO_START_FAILED), GetLastError());
+			return AE_CO_START_FAILED;
+		}
+
+		_start_time = av_gettime_relative();
+
+		_running = true;
+
+		
+		if(!_is_input) {
+			_render_thread = std::thread(std::bind(&record_audio_wasapi::render_loop, this));
+		}
+		
+
+		_thread = std::thread(std::bind(&record_audio_wasapi::record_loop, this));
+
+		return AE_NO;
+	}
+
+	int record_audio_wasapi::pause()
+	{
+		_paused = true;
+		return AE_NO;
+	}
+
+	int record_audio_wasapi::resume()
+	{
+		_paused = false;
+		return AE_NO;
+	}
+
+	int record_audio_wasapi::stop()
+	{
+		_running = false;
+		SetEvent(_stop_event);
+
+		if (_render_thread.joinable())
+			_render_thread.join();
+
+		if (_thread.joinable())
+			_thread.join();
+
+		if (_capture_client)
+			_capture_client->Stop();
+
+		if (_render_client)
+			_render_client->Stop();
+
+		return AE_NO;
+	}
+
+	const AVRational record_audio_wasapi::get_time_base()
+	{
+		if (_use_device_ts)
+			return{ 1,NS_PER_SEC };
+		else return{ 1,AV_TIME_BASE };
+	}
+
+	int64_t record_audio_wasapi::get_start_time()
+	{
+		return _start_time;
+	}
+
+	void record_audio_wasapi::process_data(AVFrame *frame, uint8_t* data, uint32_t sample_count, uint64_t device_ts)
+	{
+		int sample_size = _bit_per_sample / 8 * _channel_num;
+
+		//wasapi time unit is 100ns,so time base is NS_PER_SEC
+		frame->pts = _use_device_ts ? device_ts * 100 : av_gettime_relative();
+
+		if (_use_device_ts == false) {
+			frame->pts -= (int64_t)sample_count * NS_PER_SEC / (int64_t)_sample_rate / 100;
+		}
+
+		frame->pkt_dts = frame->pts;
+		frame->nb_samples = sample_count;
+		frame->format = _fmt;
+		frame->sample_rate = _sample_rate;
+		frame->channels = _channel_num;
+		frame->pkt_size = sample_count*sample_size;
+
+		av_samples_fill_arrays(frame->data, frame->linesize, data, _channel_num, sample_count, _fmt, 1);
+
+		if (_on_data) _on_data(frame, _cb_extra_index);
+	}
+
+	int record_audio_wasapi::do_record(AVFrame *frame)
+	{
+		HRESULT res = S_OK;
+		LPBYTE buffer = NULL;
+		DWORD flags = 0;
+		uint32_t sample_count = 0;
+		UINT64 pos, ts;
+		int error = AE_NO;
+
+		while (_running) {
+			res = _capture->GetNextPacketSize(&sample_count);
+
+			if (FAILED(res)) {
+				if (res != AUDCLNT_E_DEVICE_INVALIDATED)
+					al_error("GetNextPacketSize failed: %lX", res);
+				error = AE_CO_GET_PACKET_FAILED;
+				break;
+			}
+
+			if (!sample_count)
+				break;
+
+			buffer = NULL;
+			res = _capture->GetBuffer(&buffer, &sample_count, &flags, &pos, &ts);
+			if (FAILED(res)) {
+				if (res != AUDCLNT_E_DEVICE_INVALIDATED)
+					al_error("GetBuffer failed: %lX",res);
+				error = AE_CO_GET_BUFFER_FAILED;
+				break;
+			}
+
+			//input mode do not have silent data flag do nothing here
+			if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+				//al_warn("on slient data %d", sample_count);
+			}
+
+
+
+			if (buffer) {
+				process_data(frame, buffer, sample_count, ts);
+			}
+			else {
+				al_error("buffer invalid is");
+			}
+
+			_capture->ReleaseBuffer(sample_count);
+		}
+
+		return error;
+	}
+
+	void record_audio_wasapi::render_loop()
+	{
+		HANDLE events[2] = { _stop_event,_render_event };
+
+		HRESULT res = S_OK;
+		uint8_t *pData = NULL;
+		uint32_t padding_count = 0;
+
+		while (_running && 
+			WaitForMultipleObjects(2, events, FALSE, INFINITE) != WAIT_OBJECT_0
+			) {
+			
+			res = _render_client->GetCurrentPadding(&padding_count);
+			if (FAILED(res)) {
+				break;
+			}
+
+			if (padding_count == _render_sample_count) {
+				if (_on_error) _on_error(AE_CO_PADDING_UNEXPECTED, _cb_extra_index);
+				break;
+			}
+
+			res = _render->GetBuffer(_render_sample_count - padding_count, &pData);
+			if (FAILED(res)) {
+				if (_on_error) _on_error(AE_CO_GET_BUFFER_FAILED, _cb_extra_index);
+				break;
+			}
+
+			res = _render->ReleaseBuffer(_render_sample_count - padding_count, AUDCLNT_BUFFERFLAGS_SILENT);
+			if (FAILED(res)) {
+				if (_on_error) _on_error(AE_CO_RELEASE_BUFFER_FAILED, _cb_extra_index);
+				break;
+			}
+		}
+	}
+
+	void record_audio_wasapi::record_loop()
+	{
+		AVFrame *frame = av_frame_alloc();
+
+		HANDLE events[2] = { _stop_event,_ready_event };
+
+		// while,sys will never not signal this ready_event in windows7,
+		// and only signal when something is rendring,so we just wait 10ms for speaker
+		DWORD dur = _is_input ? INFINITE : 10;
+
+		int error = AE_NO;
+		while (_running)
+		{
+			if (WaitForMultipleObjects(2, events, FALSE, dur) == WAIT_OBJECT_0)
+				break;
+
+			if ((error = do_record(frame)) != AE_NO) {
+				if (_on_error) _on_error(error, _cb_extra_index);
+				break;
+			}
+		}//while(_running)
+
+		av_frame_free(&frame);
+ 	}
+
+	void record_audio_wasapi::clean_wasapi()
+	{
+		if (_wfex)
+			CoTaskMemFree(_wfex);
+		_wfex = NULL;
+
+		if (_enumerator)
+			_enumerator->Release();
+		_enumerator = nullptr;
+
+		if (_device)
+			_device->Release();
+		_device = nullptr;
+
+		if (_capture_client)
+			_capture_client->Release();
+		_capture_client = nullptr;
+
+		if (_render_client)
+			_render_client->Release();
+		_render_client = nullptr;
+
+		if (_capture)
+			_capture->Release();
+		_capture = nullptr;
+
+		if (_render)
+			_render->Release();
+		_render = nullptr;
+
+		if (_ready_event)
+			CloseHandle(_ready_event);
+		_ready_event = NULL;
+
+		if (_stop_event)
+			CloseHandle(_stop_event);
+		_stop_event = NULL;
+
+		if (_render_event)
+			CloseHandle(_render_event);
+		_render_event = NULL;
+
+		if(_co_inited == true)
+			CoUninitialize();
+
+		_co_inited = false;
+		_inited = false;
+	}
+}

+ 88 - 0
libs/Recorder/record_audio_wasapi.h

@@ -0,0 +1,88 @@
+#ifndef RECORD_AUDIO_WASAPI
+#define RECORD_AUDIO_WASAPI
+
+#include "record_audio.h"
+
+#ifdef _WIN32
+
+#include "headers_mmdevice.h"
+
+#endif // _WIN32
+
+namespace am {
+	class record_audio_wasapi:public record_audio
+	{
+	public:
+		record_audio_wasapi();
+		~record_audio_wasapi();
+
+		virtual int init(const std::string &device_name,
+			const std::string &device_id,
+			bool is_input);
+
+		virtual int start();
+
+		virtual int pause();
+
+		virtual int resume();
+
+		virtual int stop();
+
+		virtual const AVRational get_time_base();
+
+		virtual int64_t get_start_time();
+
+	private:
+		int64_t convert_layout(DWORD layout, WORD channels);
+
+		void init_format(WAVEFORMATEX *wfex);
+
+		int init_render();
+
+		void render_loop();
+
+		void process_data(AVFrame *frame, uint8_t* data, uint32_t sample_count, uint64_t device_ts);
+
+		int do_record(AVFrame *frame);
+
+		void record_loop();
+
+		void clean_wasapi();
+
+	private:
+		WAVEFORMATEX *_wfex;
+
+		IMMDeviceEnumerator *_enumerator;
+
+		IMMDevice *_device;
+
+		IAudioClient *_capture_client;
+
+		IAudioCaptureClient *_capture;
+
+		IAudioRenderClient *_render;
+
+		IAudioClient *_render_client;
+
+		std::thread _render_thread;
+
+		uint32_t _capture_sample_count;
+		uint32_t _render_sample_count;
+
+		HANDLE _ready_event;
+		HANDLE _stop_event;
+		HANDLE _render_event;
+
+		bool _co_inited;
+
+		bool _is_default;
+
+		bool _use_device_ts;
+
+		//define time stamps here
+		int64_t _start_time;
+	};
+}
+
+#endif // !RECORD_AUDIO_WASAPI
+

+ 23 - 0
libs/Recorder/record_desktop.cpp

@@ -0,0 +1,23 @@
+#include "record_desktop.h"
+
+am::record_desktop::record_desktop()
+{
+	_running = false;
+	_paused = false;
+	_inited = false;
+
+	_on_data = nullptr;
+	_on_error = nullptr;
+
+	_device_name = "";
+	_data_type = RECORD_DESKTOP_DATA_TYPES::AT_DESKTOP_BGRA;
+
+	_time_base = { 1,AV_TIME_BASE };
+	_start_time = 0;
+	_pixel_fmt = AV_PIX_FMT_NONE;
+}
+
+am::record_desktop::~record_desktop()
+{
+	
+}

+ 80 - 0
libs/Recorder/record_desktop.h

@@ -0,0 +1,80 @@
+#ifndef RECORD_DESKTOP
+#define RECORD_DESKTOP
+
+#include "record_desktop_define.h"
+
+#include "headers_ffmpeg.h"
+
+#include <atomic>
+#include <thread>
+#include <functional>
+#include <string>
+
+namespace am {
+	typedef std::function<void(AVFrame *frame)> cb_desktop_data;
+	typedef std::function<void(int)> cb_desktop_error;
+
+	class record_desktop
+	{
+	public:
+		record_desktop();
+		virtual ~record_desktop();
+
+		virtual int init(
+			const RECORD_DESKTOP_RECT &rect,
+			const int fps
+		) = 0;
+
+		virtual int start() = 0;
+		virtual int pause() = 0;
+		virtual int resume() = 0;
+		virtual int stop() = 0;
+
+		inline const AVRational & get_time_base() { return _time_base; }
+
+		inline int64_t get_start_time() { return _start_time; }
+
+		inline AVPixelFormat get_pixel_fmt() { return _pixel_fmt; }
+
+	public:
+		inline bool is_recording() { return _running; }
+		inline const std::string & get_device_name() { return _device_name; }
+		inline const RECORD_DESKTOP_DATA_TYPES get_data_type() { return _data_type; }
+		inline void registe_cb(
+			cb_desktop_data on_data,
+			cb_desktop_error on_error) {
+			_on_data = on_data;
+			_on_error = on_error;
+		}
+		inline const RECORD_DESKTOP_RECT & get_rect() { return _rect; }
+
+		inline const int get_frame_rate() { return _fps; }
+
+	protected:
+		virtual void clean_up() = 0;
+
+	protected:
+		std::atomic_bool _running;
+		std::atomic_bool _paused;
+		std::atomic_bool _inited;
+
+		std::thread _thread;
+
+		std::string _device_name;
+
+		RECORD_DESKTOP_RECT _rect;
+		RECORD_DESKTOP_DATA_TYPES _data_type;
+
+		int _fps;
+
+		cb_desktop_data _on_data;
+		cb_desktop_error _on_error;
+
+		AVRational _time_base;
+		int64_t _start_time;
+		AVPixelFormat _pixel_fmt;
+	};
+}
+
+
+#endif

+ 45 - 0
libs/Recorder/record_desktop_define.h

@@ -0,0 +1,45 @@
+#ifndef RECORD_DESKTOP_DEFINE
+#define RECORD_DESKTOP_DEFINE
+
+/*
+* Record typee
+*
+*/
+typedef enum {
+	DT_DESKTOP_NO = 0,
+	DT_DESKTOP_FFMPEG_GDI,
+	DT_DESKTOP_FFMPEG_DSHOW,
+	DT_DESKTOP_WIN_GDI,
+	DT_DESKTOP_WIN_DUPLICATION,
+  DT_DESKTOP_WIN_WGC,
+	DT_DESKTOP_WIN_MAG
+}RECORD_DESKTOP_TYPES;
+
+/*
+* Record desktop data type
+*
+*/
+
+typedef enum {
+	AT_DESKTOP_NO = 0,
+	AT_DESKTOP_RGBA,
+	AT_DESKTOP_BGRA
+}RECORD_DESKTOP_DATA_TYPES;
+
+/**
+* Record desktop rect
+*
+*/
+
+typedef struct {
+	int left;
+	int top;
+	int right;
+	int bottom;
+}RECORD_DESKTOP_RECT;
+
+
+
+
+
+#endif

+ 893 - 0
libs/Recorder/record_desktop_duplication.cpp

@@ -0,0 +1,893 @@
+#include "record_desktop_duplication.h"
+
+#include "system_lib.h"
+#include "d3d_helper.h"
+#include "d3d_pixelshader.h"
+#include "d3d_vertexshader.h"
+
+#include "utils_string.h"
+
+#include "system_error.h"
+#include "error_define.h"
+#include "log_helper.h"
+
+
+namespace am {
+
+	record_desktop_duplication::record_desktop_duplication()
+	{
+		_data_type = RECORD_DESKTOP_DATA_TYPES::AT_DESKTOP_BGRA;
+		_buffer = NULL;
+		_buffer_size = 0;
+
+		_d3d11 = nullptr;
+		_dxgi = nullptr;
+		
+		_d3d_device = nullptr;
+		_d3d_ctx = nullptr;
+		_d3d_vshader = nullptr;
+		_d3d_pshader = nullptr;
+		_d3d_inlayout = nullptr;
+		_d3d_samplerlinear = nullptr;
+
+		_duplication = nullptr;
+		_image = nullptr;
+		_output_des = { 0 };
+		_output_index = 0;
+
+		ZeroMemory(&_cursor_info, sizeof(_cursor_info));
+	}
+
+
+	record_desktop_duplication::~record_desktop_duplication()
+	{
+		stop();
+		clean_up();
+	}
+
+	int record_desktop_duplication::init(const RECORD_DESKTOP_RECT & rect, const int fps)
+	{
+		int error = AE_NO;
+		if (_inited == true) {
+			return error;
+		}
+
+		_fps = fps;
+		_rect = rect;
+
+		do {
+			_d3d11 = load_system_library("d3d11.dll");
+			_dxgi = load_system_library("dxgi.dll");
+
+			if (!_d3d11 || !_dxgi) {
+				error = AE_D3D_LOAD_FAILED;
+				break;
+			}
+
+			error = init_d3d11();
+			if (error != AE_NO)
+				break;
+
+			_width = rect.right - rect.left;
+			_height = rect.bottom - rect.top;
+			_buffer_size = (_width * 32 + 31) / 32 * _height * 4;
+			_buffer = new uint8_t[_buffer_size];
+
+			_start_time = av_gettime_relative();
+			_time_base = { 1,AV_TIME_BASE };
+			_pixel_fmt = AV_PIX_FMT_BGRA;
+
+			error = init_duplication();
+			if (error != AE_NO) {
+				break;
+			}
+
+			_inited = true;
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("%s,last error:%s", err2str(error), system_error::error2str(GetLastError()).c_str());
+		}
+
+		return error;
+	}
+
+	int record_desktop_duplication::start()
+	{
+		if (_running == true) {
+			al_warn("record desktop duplication is already running");
+			return AE_NO;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+		_running = true;
+		_thread = std::thread(std::bind(&record_desktop_duplication::record_func, this));
+
+		return AE_NO;
+	}
+
+	int record_desktop_duplication::pause()
+	{
+		_paused = true;
+		return AE_NO;
+	}
+
+	int record_desktop_duplication::resume()
+	{
+		_paused = false;
+		return AE_NO;
+	}
+
+	int record_desktop_duplication::stop()
+	{
+		_running = false;
+		if (_thread.joinable())
+			_thread.join();
+
+		return AE_NO;
+	}
+
+	void record_desktop_duplication::clean_up()
+	{
+		_inited = false;
+
+		if (_buffer)
+			delete[] _buffer;
+
+		_buffer = nullptr;
+
+		if (_cursor_info.buff) {
+			delete[] _cursor_info.buff;
+			_cursor_info.buff = nullptr;
+		}
+
+		ZeroMemory(&_cursor_info, sizeof(_cursor_info));
+
+		//Clean up duplication interfaces
+		clean_duplication();
+
+		//Clean up d3d11 interfaces
+		clean_d3d11();
+
+		//finally free d3d11 & dxgi library
+		if (_d3d11) free_system_library(_d3d11);
+		_d3d11 = nullptr;
+
+		if (_dxgi) free_system_library(_dxgi);
+		_dxgi = nullptr;
+	}
+
+	int record_desktop_duplication::get_dst_adapter(IDXGIAdapter ** adapter)
+	{
+		int error = AE_NO;
+		do {
+			auto adapters = d3d_helper::get_adapters(&error, true);
+			if (error != AE_NO || adapters.size() == 0)
+				break;
+
+			for (std::list<IDXGIAdapter *>::iterator itr = adapters.begin(); itr != adapters.end(); itr++) {
+				IDXGIOutput *adapter_output = nullptr;
+				DXGI_ADAPTER_DESC adapter_desc = { 0 };
+				DXGI_OUTPUT_DESC adapter_output_desc = { 0 };
+				(*itr)->GetDesc(&adapter_desc);
+				al_debug("adaptor:%s", utils_string::unicode_ascii(adapter_desc.Description).c_str());
+
+				unsigned int n = 0;
+				RECT output_rect;
+				while ((*itr)->EnumOutputs(n, &adapter_output) != DXGI_ERROR_NOT_FOUND)
+				{
+					HRESULT hr = adapter_output->GetDesc(&adapter_output_desc);
+					if (FAILED(hr)) continue;
+
+					output_rect = adapter_output_desc.DesktopCoordinates;
+
+					al_debug("  output:%s left:%d top:%d right:%d bottom:%d",
+						utils_string::unicode_ascii(adapter_output_desc.DeviceName).c_str(),
+						output_rect.left, output_rect.top, output_rect.right, output_rect.bottom);
+
+					if (output_rect.left <= _rect.left &&
+						output_rect.top <= _rect.top &&
+						output_rect.right >= _rect.right &&
+						output_rect.bottom >= _rect.bottom) {
+						error = AE_NO;
+						break;
+					}
+
+					++n;
+				}
+
+				if (error != AE_DXGI_FOUND_ADAPTER_FAILED) {
+					*adapter = *itr;
+					break;
+				}
+			}
+
+		} while (0);
+
+		return error;
+	}
+
+	int record_desktop_duplication::create_d3d_device(IDXGIAdapter *adapter, ID3D11Device ** device)
+	{
+		int error = AE_NO;
+		do {
+			PFN_D3D11_CREATE_DEVICE create_device =
+				(PFN_D3D11_CREATE_DEVICE)GetProcAddress(_d3d11, "D3D11CreateDevice");
+			if (!create_device) {
+				error = AE_D3D_GET_PROC_FAILED;
+				break;
+			}
+
+			HRESULT hr = S_OK;
+
+			// Driver types supported
+			// If you set the pAdapter parameter to a non - NULL value, 
+			// you must also set the DriverType parameter to the D3D_DRIVER_TYPE_UNKNOWN value.
+			D3D_DRIVER_TYPE driver_types[] =
+			{
+				D3D_DRIVER_TYPE_UNKNOWN,
+				D3D_DRIVER_TYPE_HARDWARE,
+				D3D_DRIVER_TYPE_WARP,
+				D3D_DRIVER_TYPE_REFERENCE,
+			};
+			UINT n_driver_types = ARRAYSIZE(driver_types);
+
+			// Feature levels supported
+			D3D_FEATURE_LEVEL feature_levels[] =
+			{
+				D3D_FEATURE_LEVEL_11_0,
+				D3D_FEATURE_LEVEL_10_1,
+				D3D_FEATURE_LEVEL_10_0,
+				D3D_FEATURE_LEVEL_9_1
+			};
+			UINT n_feature_levels = ARRAYSIZE(feature_levels);
+
+			D3D_FEATURE_LEVEL feature_level;
+
+			// Create device
+			for (UINT driver_index = 0; driver_index < n_driver_types; ++driver_index)
+			{
+				hr = create_device(adapter, driver_types[driver_index], nullptr, 0, feature_levels, n_feature_levels,
+					D3D11_SDK_VERSION, device, &feature_level, &_d3d_ctx);
+				if (SUCCEEDED(hr)) break;
+			}
+
+			if (FAILED(hr))
+			{
+				error = AE_D3D_CREATE_DEVICE_FAILED;
+				break;
+			}
+
+		} while (0);
+
+		return error;
+	}
+
+	int record_desktop_duplication::init_d3d11()
+	{
+		int error = AE_NO;
+
+		do {
+			IDXGIAdapter *adapter = nullptr;
+			error = get_dst_adapter(&adapter);
+			if (error != AE_NO )
+				break;
+			
+			error = create_d3d_device(adapter, &_d3d_device);
+			if (error != AE_NO)
+				break;
+			//No need for grab full screen,but in move & dirty rects copy
+#if 0
+			// VERTEX shader
+			UINT Size = ARRAYSIZE(g_VS);
+			HRESULT hr = _d3d_device->CreateVertexShader(g_VS, Size, nullptr, &_d3d_vshader);
+			if (FAILED(hr))
+			{
+				error = AE_D3D_CREATE_VERTEX_SHADER_FAILED;
+				break;
+			}
+
+			// Input layout
+			D3D11_INPUT_ELEMENT_DESC layouts[] =
+			{
+				{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
+				{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 }
+			};
+			UINT n_layouts = ARRAYSIZE(layouts);
+			hr = _d3d_device->CreateInputLayout(layouts, n_layouts, g_VS, Size, &_d3d_inlayout);
+			if (FAILED(hr))
+			{
+				error = AE_D3D_CREATE_INLAYOUT_FAILED;
+				break;
+			}
+			_d3d_ctx->IASetInputLayout(_d3d_inlayout);
+
+			// Pixel shader
+			Size = ARRAYSIZE(g_PS);
+			hr = _d3d_device->CreatePixelShader(g_PS, Size, nullptr, &_d3d_pshader);
+			if (FAILED(hr))
+			{
+				error = AE_D3D_CREATE_PIXEL_SHADER_FAILED;
+				break;
+			}
+
+			// Set up sampler
+			D3D11_SAMPLER_DESC sampler_desc;
+			RtlZeroMemory(&sampler_desc, sizeof(sampler_desc));
+			sampler_desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
+			sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
+			sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
+			sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP;
+			sampler_desc.ComparisonFunc = D3D11_COMPARISON_NEVER;
+			sampler_desc.MinLOD = 0;
+			sampler_desc.MaxLOD = D3D11_FLOAT32_MAX;
+			hr = _d3d_device->CreateSamplerState(&sampler_desc, &_d3d_samplerlinear);
+			if (FAILED(hr))
+			{
+				error = AE_D3D_CREATE_SAMPLERSTATE_FAILED;
+				break;
+			}
+#endif
+		} while (0);
+
+		return error;
+	}
+
+	void record_desktop_duplication::clean_d3d11()
+	{
+		if (_d3d_device) _d3d_device->Release();
+		_d3d_device = nullptr;
+
+		if (_d3d_ctx) _d3d_ctx->Release();
+		_d3d_ctx = nullptr;
+
+		if (_d3d_vshader) _d3d_vshader->Release();
+		_d3d_vshader = nullptr;
+
+		if (_d3d_pshader) _d3d_pshader->Release();
+		_d3d_pshader = nullptr;
+
+		if (_d3d_inlayout) _d3d_inlayout->Release();
+		_d3d_inlayout = nullptr;
+
+		if (_d3d_samplerlinear) _d3d_samplerlinear->Release();
+		_d3d_samplerlinear = nullptr;
+	}
+
+	int record_desktop_duplication::init_duplication()
+	{
+		int error = AE_NO;
+		do {
+			// Get DXGI device
+			IDXGIDevice* dxgi_device = nullptr;
+			HRESULT hr = _d3d_device->QueryInterface(__uuidof(IDXGIDevice), reinterpret_cast<void**>(&dxgi_device));
+			if (FAILED(hr))
+			{
+				error = AE_D3D_QUERYINTERFACE_FAILED;
+				break;
+			}
+
+			// Get DXGI adapter
+			IDXGIAdapter* dxgi_adapter = nullptr;
+			hr = dxgi_device->GetParent(__uuidof(IDXGIAdapter), reinterpret_cast<void**>(&dxgi_adapter));
+			dxgi_device->Release();
+			dxgi_device = nullptr;
+			if (FAILED(hr))
+			{
+				error = AE_DUP_GET_PARENT_FAILED;
+				break;
+			}
+
+			// Get output
+			IDXGIOutput* dxgi_output = nullptr;
+			hr = dxgi_adapter->EnumOutputs(_output_index, &dxgi_output);
+			dxgi_adapter->Release();
+			dxgi_adapter = nullptr;
+			if (FAILED(hr))
+			{
+				error = AE_DUP_ENUM_OUTPUT_FAILED;
+				break;
+			}
+
+			dxgi_output->GetDesc(&_output_des);
+
+			// QI for Output 1
+			IDXGIOutput1* dxgi_output1 = nullptr;
+			hr = dxgi_output->QueryInterface(__uuidof(dxgi_output1), reinterpret_cast<void**>(&dxgi_output1));
+			dxgi_output->Release();
+			dxgi_output = nullptr;
+			if (FAILED(hr))
+			{
+				error = AE_DUP_QI_FAILED;
+				break;
+			}
+
+			// Create desktop duplication
+			hr = dxgi_output1->DuplicateOutput(_d3d_device, &_duplication);
+			dxgi_output1->Release();
+			dxgi_output1 = nullptr;
+			if (FAILED(hr))
+			{
+				error = AE_DUP_DUPLICATE_FAILED;
+				if (hr == DXGI_ERROR_NOT_CURRENTLY_AVAILABLE)
+				{
+					error = AE_DUP_DUPLICATE_MAX_FAILED;
+				}
+
+				al_error("duplicate output failed,%lld", hr);
+				break;
+			}
+		} while (0);
+
+		return error;
+	}
+
+	int record_desktop_duplication::free_duplicated_frame()
+	{
+		HRESULT hr = _duplication->ReleaseFrame();
+		if (FAILED(hr))
+		{
+			return AE_DUP_RELEASE_FRAME_FAILED;
+		}
+
+		if (_image)
+		{
+			_image->Release();
+			_image = nullptr;
+		}
+
+		return AE_DUP_RELEASE_FRAME_FAILED;
+	}
+
+	void record_desktop_duplication::clean_duplication()
+	{
+		if (_duplication) _duplication->Release();
+		if (_image) _image->Release();
+
+		_duplication = nullptr;
+		_image = nullptr;
+	}
+
+	bool record_desktop_duplication::attatch_desktop()
+	{
+		HDESK desktop = nullptr;
+		desktop = OpenInputDesktop(0, FALSE, GENERIC_ALL);
+		if (!desktop)
+		{
+			// We do not have access to the desktop so request a retry
+			return false;
+		}
+
+		// Attach desktop to this thread
+		bool battached = SetThreadDesktop(desktop) != 0;
+		CloseDesktop(desktop);
+
+		if (!battached)
+		{
+			// We do not have access to the desktop so request a retry
+			return false;
+		}
+
+		return true;
+	}
+
+	int record_desktop_duplication::get_desktop_image(DXGI_OUTDUPL_FRAME_INFO *frame_info)
+	{
+		IDXGIResource* dxgi_res = nullptr;
+
+		// Get new frame
+		HRESULT hr = _duplication->AcquireNextFrame(500, frame_info, &dxgi_res);
+
+		// Timeout will return when desktop has no chane
+		if (hr == DXGI_ERROR_WAIT_TIMEOUT) return AE_TIMEOUT;
+
+		if (FAILED(hr))
+			return AE_DUP_ACQUIRE_FRAME_FAILED;
+
+		// QI for IDXGIResource
+		hr = dxgi_res->QueryInterface(__uuidof(ID3D11Texture2D), reinterpret_cast<void **>(&_image));
+		dxgi_res->Release();
+		dxgi_res = nullptr;
+		if (FAILED(hr)) return AE_DUP_QI_FRAME_FAILED;
+
+		// Copy old description
+		D3D11_TEXTURE2D_DESC frame_desc;
+		_image->GetDesc(&frame_desc);
+
+		// Create a new staging buffer for fill frame image
+		ID3D11Texture2D *new_image = NULL;
+		frame_desc.Usage = D3D11_USAGE_STAGING;
+		frame_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE;
+		frame_desc.BindFlags = 0;
+		frame_desc.MiscFlags = 0;
+		frame_desc.MipLevels = 1;
+		frame_desc.ArraySize = 1;
+		frame_desc.SampleDesc.Count = 1;
+		frame_desc.SampleDesc.Quality = 0;
+		hr = _d3d_device->CreateTexture2D(&frame_desc, NULL, &new_image);
+		if (FAILED(hr)) return AE_DUP_CREATE_TEXTURE_FAILED;
+
+
+		// Copy next staging buffer to new staging buffer
+		_d3d_ctx->CopyResource(new_image, _image);
+
+#if 1 
+		// Should calc the row pitch ,and compare dst row pitch with frame row pitch
+		// Create staging buffer for map bits
+		IDXGISurface *dxgi_surface = NULL;
+		hr = new_image->QueryInterface(__uuidof(IDXGISurface), (void **)(&dxgi_surface));
+		new_image->Release();
+		if (FAILED(hr)) return AE_DUP_QI_DXGI_FAILED;
+
+		// Map buff to mapped rect structure
+		DXGI_MAPPED_RECT mapped_rect;
+		hr = dxgi_surface->Map(&mapped_rect, DXGI_MAP_READ);
+		if (FAILED(hr)) return AE_DUP_MAP_FAILED;
+
+		int dst_offset_x = _rect.left - _output_des.DesktopCoordinates.left;
+		int dst_offset_y = _rect.top - _output_des.DesktopCoordinates.top;
+		int dst_rowpitch = min(frame_desc.Width, _rect.right - _rect.left) * 4;
+		int dst_colpitch = min(_height, _output_des.DesktopCoordinates.bottom - _output_des.DesktopCoordinates.top - dst_offset_y);
+
+		for (int h = 0; h < dst_colpitch; h++) {
+			memcpy_s(_buffer + h*dst_rowpitch, dst_rowpitch,
+				(BYTE*)mapped_rect.pBits + (h + dst_offset_y)*mapped_rect.Pitch + dst_offset_x * 4, min(mapped_rect.Pitch, dst_rowpitch));
+		}
+
+
+		dxgi_surface->Unmap();
+
+		dxgi_surface->Release();
+		dxgi_surface = nullptr;
+
+#else
+
+		D3D11_MAPPED_SUBRESOURCE resource;
+		UINT subresource = D3D11CalcSubresource(0, 0, 0);
+
+		hr = _d3d_ctx->Map(new_image, subresource, D3D11_MAP_READ_WRITE, 0, &resource);
+		new_image->Release();
+		if (FAILED(hr)) return AE_DUP_MAP_FAILED;
+
+		int dst_rowpitch = frame_desc.Width * 4;
+		for (int h = 0; h < frame_desc.Height; h++) {
+			memcpy_s(_buffer + h*dst_rowpitch, dst_rowpitch, (BYTE*)resource.pData + h*resource.RowPitch, min(resource.RowPitch, dst_rowpitch));
+		}
+
+#endif
+
+		return AE_NO;
+	}
+
+	int record_desktop_duplication::get_desktop_cursor(const DXGI_OUTDUPL_FRAME_INFO *frame_info)
+	{
+		// A non-zero mouse update timestamp indicates that there is a mouse position update and optionally a shape change
+		if (frame_info->LastMouseUpdateTime.QuadPart == 0)
+			return AE_NO;
+
+		bool b_updated = true;
+
+		// Make sure we don't update pointer position wrongly
+		// If pointer is invisible, make sure we did not get an update from another output that the last time that said pointer
+		// was visible, if so, don't set it to invisible or update.
+		if (!frame_info->PointerPosition.Visible && (_cursor_info.output_index != _output_index))
+			b_updated = false;
+
+		// If two outputs both say they have a visible, only update if new update has newer timestamp
+		if (frame_info->PointerPosition.Visible && _cursor_info.visible && (_cursor_info.output_index != _output_index) && (_cursor_info.pre_timestamp.QuadPart > frame_info->LastMouseUpdateTime.QuadPart))
+			b_updated = false;
+
+		// Update position
+		if (b_updated)
+		{
+			_cursor_info.position.x = frame_info->PointerPosition.Position.x + _output_des.DesktopCoordinates.left;
+			_cursor_info.position.y = frame_info->PointerPosition.Position.y + _output_des.DesktopCoordinates.top;
+			_cursor_info.output_index = _output_index;
+			_cursor_info.pre_timestamp = frame_info->LastMouseUpdateTime;
+			_cursor_info.visible = frame_info->PointerPosition.Visible != 0;
+		}
+
+		// No new shape only update cursor positions & visible state
+		if (frame_info->PointerShapeBufferSize == 0)
+		{
+			return AE_NO;
+		}
+
+		// Old buffer too small
+		if (frame_info->PointerShapeBufferSize > _cursor_info.size)
+		{
+			if (_cursor_info.buff)
+			{
+				delete[] _cursor_info.buff;
+				_cursor_info.buff = nullptr;
+			}
+			_cursor_info.buff = new (std::nothrow) BYTE[frame_info->PointerShapeBufferSize];
+			if (!_cursor_info.buff)
+			{
+				_cursor_info.size = 0;
+				return AE_ALLOCATE_FAILED;
+			}
+
+			// Update buffer size
+			_cursor_info.size = frame_info->PointerShapeBufferSize;
+		}
+
+		// Get shape
+		UINT BufferSizeRequired;
+		HRESULT hr = _duplication->GetFramePointerShape(frame_info->PointerShapeBufferSize, reinterpret_cast<VOID*>(_cursor_info.buff), &BufferSizeRequired, &(_cursor_info.shape));
+		if (FAILED(hr))
+		{
+			delete[] _cursor_info.buff;
+			_cursor_info.buff = nullptr;
+			_cursor_info.size = 0;
+			return AE_DUP_GET_CURSORSHAPE_FAILED;
+		}
+
+		return AE_NO;
+	}
+
+	static unsigned int bit_reverse(unsigned int n)
+	{
+
+		n = ((n >> 1) & 0x55555555) | ((n << 1) & 0xaaaaaaaa);
+
+		n = ((n >> 2) & 0x33333333) | ((n << 2) & 0xcccccccc);
+
+		n = ((n >> 4) & 0x0f0f0f0f) | ((n << 4) & 0xf0f0f0f0);
+
+		n = ((n >> 8) & 0x00ff00ff) | ((n << 8) & 0xff00ff00);
+
+		n = ((n >> 16) & 0x0000ffff) | ((n << 16) & 0xffff0000);
+
+		return n;
+	}
+
+	void record_desktop_duplication::draw_cursor()
+	{
+		if (_cursor_info.visible == false) return;
+
+		int cursor_width = 0, cursor_height = 0, left = 0, top = 0;
+
+		cursor_width = _cursor_info.shape.Width;
+		cursor_height = _cursor_info.shape.Height;
+
+		// In case that,the value of position is negative value
+		left = abs(_cursor_info.position.x - _rect.left);
+		top = abs(_cursor_info.position.y - _rect.top);
+
+		// Notice here
+		if (_cursor_info.shape.Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME)
+			cursor_height = cursor_height / 2;
+
+		//Skip invisible pixel
+		cursor_width = min(_width - left, cursor_width);
+		cursor_height = min(_height - top, cursor_height);
+
+		//al_debug("left:%d top:%d width:%d height:%d type:%d", left, top, cursor_width, height, _cursor_info.shape.Type);
+
+		switch (_cursor_info.shape.Type)
+		{
+
+			// The pointer type is a color mouse pointer, 
+			// which is a color bitmap. The bitmap's size 
+			// is specified by width and height in a 32 bpp 
+			// ARGB DIB format.
+			// should trans cursor to BGRA?
+			case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR:
+			{
+				unsigned int *cursor_32 = reinterpret_cast<unsigned int*>(_cursor_info.buff);
+				unsigned int *screen_32 = reinterpret_cast<unsigned int*>(_buffer);
+
+				for (int row = 0; row < cursor_height; row++) {
+					for (int col = 0; col < cursor_width; col++) {
+						unsigned int cur_cursor_val = cursor_32[col + (row * (_cursor_info.shape.Pitch / sizeof(UINT)))];
+						
+						//Skip black or empty value
+						if (cur_cursor_val == 0x00000000)
+							continue;
+						else
+							screen_32[(abs(top) + row) *_width + abs(left) + col] = cur_cursor_val;//bit_reverse(cur_cursor_val);
+					}
+				}
+				break;
+			}
+
+			// The pointer type is a monochrome mouse pointer, 
+			// which is a monochrome bitmap. The bitmap's size 
+			// is specified by width and height in a 1 bits per 
+			// pixel (bpp) device independent bitmap (DIB) format 
+			// AND mask that is followed by another 1 bpp DIB format 
+			// XOR mask of the same size.
+			case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME:
+			{
+				unsigned int *cursor_32 = reinterpret_cast<unsigned int*>(_cursor_info.buff);
+				unsigned int *screen_32 = reinterpret_cast<unsigned int*>(_buffer);
+
+				for (int row = 0; row < cursor_height; row++) {
+					BYTE MASK = 0x80;
+					for (int col = 0; col < cursor_width; col++) {
+						// Get masks using appropriate offsets
+						BYTE AndMask = _cursor_info.buff[(col / 8) + (row  * (_cursor_info.shape.Pitch))] & MASK;
+						BYTE XorMask = _cursor_info.buff[(col / 8) + ((row + cursor_height) * (_cursor_info.shape.Pitch))] & MASK;
+						UINT AndMask32 = (AndMask) ? 0xFFFFFFFF : 0xFF000000;
+						UINT XorMask32 = (XorMask) ? 0x00FFFFFF : 0x00000000;
+
+						// Set new pixel
+						screen_32[(abs(top) + row) *_width + abs(left) + col] = (screen_32[(abs(top) + row) *_width + abs(left) + col] & AndMask32) ^ XorMask32;
+
+						// Adjust mask
+						if (MASK == 0x01)
+						{
+							MASK = 0x80;
+						}
+						else
+						{
+							MASK = MASK >> 1;
+						}
+					}
+				}
+				break;
+			}
+			// The pointer type is a masked color mouse pointer. 
+			// A masked color mouse pointer is a 32 bpp ARGB format 
+			// bitmap with the mask value in the alpha bits. The only 
+			// allowed mask values are 0 and 0xFF. When the mask value
+			// is 0, the RGB value should replace the screen pixel. 
+			// When the mask value is 0xFF, an XOR operation is performed 
+			// on the RGB value and the screen pixel; the result replaces the screen pixel.
+			case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR:
+			{
+				unsigned int *cursor_32 = reinterpret_cast<unsigned int*>(_cursor_info.buff);
+				unsigned int *screen_32 = reinterpret_cast<unsigned int*>(_buffer);
+
+				for (int row = 0; row < cursor_height; row++) {
+					for (int col = 0; col < cursor_width; col++) {
+						unsigned int cur_cursor_val = cursor_32[col + (row * (_cursor_info.shape.Pitch / sizeof(UINT)))];
+						unsigned int cur_screen_val = screen_32[(abs(top) + row) *_width + abs(left) + col];
+						unsigned int mask_val = 0xFF000000 & cur_cursor_val;
+
+						if (mask_val) {
+							//0xFF: XOR operation is performed on the RGB value and the screen pixel
+							cur_screen_val = (cur_screen_val ^ cur_cursor_val) | 0xFF000000;
+						}
+						else {
+							//0x00: the RGB value should replace the screen pixel
+							cur_screen_val = cur_cursor_val | 0xFF000000;
+						}
+					}
+				}
+				break;
+			}
+			default:
+				break;
+		}
+	}
+
+	void record_desktop_duplication::do_sleep(int64_t dur, int64_t pre, int64_t now)
+	{
+		int64_t delay = now - pre;
+		dur = delay > dur ? max(0, dur - (delay - dur)) : (dur + dur - delay);
+
+		//al_debug("%lld", delay);
+
+		if (dur)
+			av_usleep(dur);
+	}
+
+	void record_desktop_duplication::record_func()
+	{
+		AVFrame *frame = av_frame_alloc();
+		int64_t pre_pts = 0, dur = AV_TIME_BASE / _fps;
+
+		int error = AE_NO;
+
+#if 0
+		if (attatch_desktop() != true) {
+			al_fatal("duplication attach desktop failed :%s",
+				system_error::error2str(GetLastError()).c_str());
+			if (_on_error) _on_error(AE_DUP_ATTATCH_FAILED);
+			return;
+		}
+#endif
+
+		//Should init after desktop attatched
+		//error = init_duplication();
+		//if (error != AE_NO) {
+		//	al_fatal("duplication initialize failed %s,last error :%s", err2str(error), 
+		// system_error::error2str(GetLastError()).c_str());
+		//	if (_on_error) _on_error(error);
+		//	return;
+		//}
+
+		DXGI_OUTDUPL_FRAME_INFO frame_info;
+		while (_running)
+		{
+			//Timeout is no new picture,no need to update
+			if ((error = get_desktop_image(&frame_info)) == AE_TIMEOUT) continue;
+
+			if (error != AE_NO) {
+				while (_running)
+				{
+					Sleep(300);
+					clean_duplication();
+					if ((error = init_duplication()) != AE_NO) {
+						if (_on_error) _on_error(error);
+					}
+					else break;
+				}
+
+				continue;
+			}
+
+			if ((error = get_desktop_cursor(&frame_info)) == AE_NO)
+				draw_cursor();
+
+			free_duplicated_frame();
+
+			frame->pts = av_gettime_relative();
+			frame->pkt_dts = frame->pts;
+			frame->pkt_pts = frame->pts;
+
+			frame->width = _width;
+			frame->height = _height;
+			frame->format = AV_PIX_FMT_BGRA;
+			frame->pict_type = AV_PICTURE_TYPE_NONE;
+			frame->pkt_size = _width * _height * 4;
+
+			av_image_fill_arrays(frame->data,
+				frame->linesize,
+				_buffer,
+				AV_PIX_FMT_BGRA,
+				_width,
+				_height,
+				1
+			);
+
+#if 0
+			//save bmp to test
+
+			BITMAPINFOHEADER   bi;
+
+			bi.biSize = sizeof(BITMAPINFOHEADER);
+			bi.biWidth = _width;
+			bi.biHeight = _height * (-1);
+			bi.biPlanes = 1;
+			bi.biBitCount = 32;//should get from system color bits
+			bi.biCompression = BI_RGB;
+			bi.biSizeImage = 0;
+			bi.biXPelsPerMeter = 0;
+			bi.biYPelsPerMeter = 0;
+			bi.biClrUsed = 0;
+			bi.biClrImportant = 0;
+
+			BITMAPFILEHEADER bf;
+			bf.bfType = 0x4d42;
+			bf.bfReserved1 = 0;
+			bf.bfReserved2 = 0;
+			bf.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
+			bf.bfSize = bf.bfOffBits + _width * _height * 4;
+
+			FILE *fp = fopen("..\\..\\save.bmp", "wb+");
+
+			fwrite(&bf, 1, sizeof(bf), fp);
+			fwrite(&bi, 1, sizeof(bi), fp);
+			fwrite(_buffer, 1, _buffer_size, fp);
+
+			fflush(fp);
+			fclose(fp);
+#endif
+
+			if (_on_data) _on_data(frame);
+
+			do_sleep(dur, pre_pts, frame->pts);
+
+			pre_pts = frame->pts;
+		}
+
+		av_frame_free(&frame);
+	}
+
+}

+ 92 - 0
libs/Recorder/record_desktop_duplication.h

@@ -0,0 +1,92 @@
+#ifndef RECORD_DESKTOP_DUPLICATION
+#define RECORD_DESKTOP_DUPLICATION
+
+#include "record_desktop.h"
+
+#include <Windows.h>
+#include <d3d11.h>
+#include <dxgi1_2.h>
+
+namespace am {
+	typedef struct _PTR_INFO
+	{
+		_Field_size_bytes_(BufferSize) BYTE* buff;
+		DXGI_OUTDUPL_POINTER_SHAPE_INFO shape;
+		POINT position;
+		bool visible;
+		UINT size;
+		UINT output_index;
+		LARGE_INTEGER pre_timestamp;
+	} DUPLICATION_CURSOR_INFO;
+
+	class record_desktop_duplication:
+		public record_desktop
+	{
+	public:
+		record_desktop_duplication();
+		~record_desktop_duplication();
+
+		virtual int init(
+			const RECORD_DESKTOP_RECT &rect,
+			const int fps);
+
+		virtual int start();
+		virtual int pause();
+		virtual int resume();
+		virtual int stop();
+
+	protected:
+		virtual void clean_up();
+
+	private:
+		int get_dst_adapter(IDXGIAdapter **adapter);
+
+		int create_d3d_device(IDXGIAdapter *adapter,ID3D11Device **device);
+
+		int init_d3d11();
+
+		void clean_d3d11();
+
+		int init_duplication();
+
+		int free_duplicated_frame();
+
+		void clean_duplication();
+
+		bool attatch_desktop();
+
+		int get_desktop_image(DXGI_OUTDUPL_FRAME_INFO *frame_info);
+
+		int get_desktop_cursor(const DXGI_OUTDUPL_FRAME_INFO *frame_info);
+
+		void draw_cursor();
+
+		void do_sleep(int64_t dur, int64_t pre, int64_t now);
+
+		void record_func();
+
+	private:
+		uint8_t *_buffer;
+		uint32_t _buffer_size;
+		uint32_t _width, _height;
+
+		HMODULE _d3d11, _dxgi;
+
+		ID3D11Device* _d3d_device;
+		ID3D11DeviceContext* _d3d_ctx;
+		ID3D11VertexShader* _d3d_vshader;
+		ID3D11PixelShader* _d3d_pshader;
+		ID3D11InputLayout* _d3d_inlayout;
+		ID3D11SamplerState* _d3d_samplerlinear;
+
+		IDXGIOutputDuplication *_duplication;
+		ID3D11Texture2D *_image;
+		DXGI_OUTPUT_DESC _output_des;
+		
+		int _output_index;
+		DUPLICATION_CURSOR_INFO _cursor_info;
+	};
+
+}
+
+#endif

+ 53 - 0
libs/Recorder/record_desktop_factory.cpp

@@ -0,0 +1,53 @@
+#include "record_desktop_factory.h"
+#include "record_desktop_ffmpeg_gdi.h"
+#include "record_desktop_ffmpeg_dshow.h"
+#include "record_desktop_gdi.h"
+#include "record_desktop_duplication.h"
+#include "record_desktop_wgc.h"
+#include "record_desktop_mag.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+int record_desktop_new(RECORD_DESKTOP_TYPES type, am::record_desktop ** recorder)
+{
+	int err = AE_NO;
+	switch (type)
+	{
+	case DT_DESKTOP_FFMPEG_GDI:
+		*recorder = (am::record_desktop*)new am::record_desktop_ffmpeg_gdi();
+		break;
+	case DT_DESKTOP_FFMPEG_DSHOW:
+		*recorder = (am::record_desktop*)new am::record_desktop_ffmpeg_dshow();
+		break;
+	case DT_DESKTOP_WIN_GDI:
+		*recorder = (am::record_desktop*)new am::record_desktop_gdi();
+		break;
+	case DT_DESKTOP_WIN_DUPLICATION:
+		*recorder = (am::record_desktop*)new am::record_desktop_duplication();
+		break;
+  case DT_DESKTOP_WIN_WGC:
+    *recorder =
+        (am::record_desktop *)new am::record_desktop_wgc();
+    break;
+  case DT_DESKTOP_WIN_MAG:
+    *recorder = (am::record_desktop *)new am::record_desktop_mag();
+    break;
+	default:
+		err = AE_UNSUPPORT;
+		break;
+	}
+
+	return err;
+}
+
+void record_desktop_destroy(am::record_desktop ** recorder)
+{
+	if (*recorder != nullptr) {
+		(*recorder)->stop();
+
+		delete *recorder;
+	}
+
+	*recorder = nullptr;
+}

+ 10 - 0
libs/Recorder/record_desktop_factory.h

@@ -0,0 +1,10 @@
+#ifndef RECORD_DESKTOP_FACTORY
+#define RECORD_DESKTOP_FACTORY
+
+#include "record_desktop.h"
+
+int record_desktop_new(RECORD_DESKTOP_TYPES type, am::record_desktop **recorder);
+
+void record_desktop_destroy(am::record_desktop **recorder);
+
+#endif

+ 235 - 0
libs/Recorder/record_desktop_ffmpeg_dshow.cpp

@@ -0,0 +1,235 @@
+#include "record_desktop_ffmpeg_dshow.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+
+namespace am {
+
+	record_desktop_ffmpeg_dshow::record_desktop_ffmpeg_dshow()
+	{
+		av_register_all();
+		avdevice_register_all();
+
+		_fmt_ctx = NULL;
+		_input_fmt = NULL;
+		_codec_ctx = NULL;
+		_codec = NULL;
+
+		_stream_index = -1;
+		_data_type = RECORD_DESKTOP_DATA_TYPES::AT_DESKTOP_RGBA;
+	}
+
+
+	record_desktop_ffmpeg_dshow::~record_desktop_ffmpeg_dshow()
+	{
+		stop();
+		clean_up();
+	}
+
+	int record_desktop_ffmpeg_dshow::init(const RECORD_DESKTOP_RECT & rect, const int fps)
+	{
+		int error = AE_NO;
+		if (_inited == true) {
+			return error;
+		}
+
+		_fps = fps;
+		_rect = rect;
+
+		char buff_video_size[50] = { 0 };
+		sprintf_s(buff_video_size, 50, "%dx%d", rect.right - rect.left, rect.bottom - rect.top);
+
+		AVDictionary *options = NULL;
+		av_dict_set_int(&options, "framerate", fps, AV_DICT_MATCH_CASE);
+		av_dict_set_int(&options, "offset_x", rect.left, AV_DICT_MATCH_CASE);
+		av_dict_set_int(&options, "offset_y", rect.top, AV_DICT_MATCH_CASE);
+		av_dict_set(&options, "video_size", buff_video_size, AV_DICT_MATCH_CASE);
+		av_dict_set_int(&options, "draw_mouse", 1, AV_DICT_MATCH_CASE);
+
+		int ret = 0;
+		do {
+			_fmt_ctx = avformat_alloc_context();
+			_input_fmt = av_find_input_format("dshow");
+
+			//the framerate must be same like encoder & muxer 's framerate,otherwise the video can not sync with audio
+			ret = avformat_open_input(&_fmt_ctx, "video=screen-capture-recorder", _input_fmt, &options);
+			if (ret != 0) {
+				error = AE_FFMPEG_OPEN_INPUT_FAILED;
+				break;
+			}
+
+			ret = avformat_find_stream_info(_fmt_ctx, NULL);
+			if (ret < 0) {
+				error = AE_FFMPEG_FIND_STREAM_FAILED;
+				break;
+			}
+
+			int stream_index = -1;
+			for (int i = 0; i < _fmt_ctx->nb_streams; i++) {
+				if (_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+					stream_index = i;
+					break;
+				}
+			}
+
+			if (stream_index == -1) {
+				error = AE_FFMPEG_FIND_STREAM_FAILED;
+				break;
+			}
+
+			_stream_index = stream_index;
+			_codec_ctx = _fmt_ctx->streams[stream_index]->codec;
+			_codec = avcodec_find_decoder(_codec_ctx->codec_id);
+			if (_codec == NULL) {
+				error = AE_FFMPEG_FIND_DECODER_FAILED;
+				break;
+			}
+
+			ret = avcodec_open2(_codec_ctx, _codec, NULL);
+			if (ret != 0) {
+				error = AE_FFMPEG_OPEN_CODEC_FAILED;
+				break;
+			}
+
+			_start_time = _fmt_ctx->streams[_stream_index]->start_time;
+			_time_base = _fmt_ctx->streams[_stream_index]->time_base;
+			_pixel_fmt = _fmt_ctx->streams[_stream_index]->codec->pix_fmt;
+
+			_inited = true;
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("%s,error: %d %lu", err2str(error), ret, GetLastError());
+			clean_up();
+		}
+
+		av_dict_free(&options);
+
+		return error;
+	}
+
+	int record_desktop_ffmpeg_dshow::start()
+	{
+		if (_running == true) {
+			al_warn("record desktop gdi is already running");
+			return AE_NO;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+		_running = true;
+		_thread = std::thread(std::bind(&record_desktop_ffmpeg_dshow::record_func, this));
+
+		return AE_NO;
+	}
+
+	int record_desktop_ffmpeg_dshow::pause()
+	{
+		return 0;
+	}
+
+	int record_desktop_ffmpeg_dshow::resume()
+	{
+		return 0;
+	}
+
+	int record_desktop_ffmpeg_dshow::stop()
+	{
+		_running = false;
+		if (_thread.joinable())
+			_thread.join();
+
+		return AE_NO;
+	}
+
+	void record_desktop_ffmpeg_dshow::clean_up()
+	{
+		if (_codec_ctx)
+			avcodec_close(_codec_ctx);
+
+		if (_fmt_ctx)
+			avformat_close_input(&_fmt_ctx);
+
+		_fmt_ctx = NULL;
+		_input_fmt = NULL;
+		_codec_ctx = NULL;
+		_codec = NULL;
+
+		_stream_index = -1;
+		_inited = false;
+	}
+
+	int record_desktop_ffmpeg_dshow::decode(AVFrame * frame, AVPacket * packet)
+	{
+		int ret = avcodec_send_packet(_codec_ctx, packet);
+		if (ret < 0) {
+			al_error("avcodec_send_packet failed:%d", ret);
+
+			return AE_FFMPEG_DECODE_FRAME_FAILED;
+		}
+
+		while (ret >=0)
+		{
+			ret = avcodec_receive_frame(_codec_ctx, frame);
+			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+				break;
+			}
+
+			if (ret < 0) {
+				return AE_FFMPEG_READ_FRAME_FAILED;
+			}
+
+			if (ret == 0 && _on_data)
+				_on_data(frame);
+
+			av_frame_unref(frame);//need to do this? avcodec_receive_frame said will call unref before receive
+		}
+
+		return AE_NO;
+	}
+
+	void record_desktop_ffmpeg_dshow::record_func()
+	{
+		AVPacket *packet = av_packet_alloc();
+		AVFrame *frame = av_frame_alloc();
+
+		int ret = 0;
+
+		int got_pic = 0;
+		while (_running == true) {
+
+			av_init_packet(packet);
+
+			ret = av_read_frame(_fmt_ctx, packet);
+
+			if (ret < 0) {
+				if (_on_error) _on_error(AE_FFMPEG_READ_FRAME_FAILED);
+
+				al_fatal("read frame failed:%d", ret);
+				break;
+			}
+
+			if (packet->stream_index == _stream_index) {
+				
+				ret = decode(frame, packet);
+				if (ret != AE_NO) {
+					if (_on_error) _on_error(AE_FFMPEG_DECODE_FRAME_FAILED);
+					al_fatal("decode desktop frame failed");
+					break;
+				}
+			}
+
+			av_packet_unref(packet);
+		}
+
+		//flush packet in decoder
+		decode(frame, NULL);
+
+		av_packet_free(&packet);
+		av_frame_free(&frame);
+	}
+
+}

+ 37 - 0
libs/Recorder/record_desktop_ffmpeg_dshow.h

@@ -0,0 +1,37 @@
+#pragma once
+
+#include "record_desktop.h"
+
+namespace am {
+
+	class record_desktop_ffmpeg_dshow:public record_desktop
+	{
+	public:
+		record_desktop_ffmpeg_dshow();
+		~record_desktop_ffmpeg_dshow();
+
+		virtual int init(
+			const RECORD_DESKTOP_RECT &rect,
+			const int fps);
+
+		virtual int start();
+		virtual int pause();
+		virtual int resume();
+		virtual int stop();
+
+	protected:
+		virtual void clean_up();
+
+	private:
+		int decode(AVFrame *frame, AVPacket *packet);
+
+		void record_func();
+
+		int _stream_index;
+		AVFormatContext *_fmt_ctx;
+		AVInputFormat *_input_fmt;
+		AVCodecContext *_codec_ctx;
+		AVCodec *_codec;
+	};
+
+}

+ 238 - 0
libs/Recorder/record_desktop_ffmpeg_gdi.cpp

@@ -0,0 +1,238 @@
+#include "record_desktop_ffmpeg_gdi.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+
+namespace am {
+
+	record_desktop_ffmpeg_gdi::record_desktop_ffmpeg_gdi()
+	{
+		av_register_all();
+		avdevice_register_all();
+
+		_fmt_ctx = NULL;
+		_input_fmt = NULL;
+		_codec_ctx = NULL;
+		_codec = NULL;
+
+		_stream_index = -1;
+		_data_type = RECORD_DESKTOP_DATA_TYPES::AT_DESKTOP_RGBA;
+	}
+
+
+	record_desktop_ffmpeg_gdi::~record_desktop_ffmpeg_gdi()
+	{
+		stop();
+		clean_up();
+	}
+
+	int record_desktop_ffmpeg_gdi::init(const RECORD_DESKTOP_RECT & rect, const int fps)
+	{
+		int error = AE_NO;
+		if (_inited == true) {
+			return error;
+		}
+
+		_fps = fps;
+		_rect = rect;
+
+		char buff_video_size[50] = { 0 };
+		sprintf_s(buff_video_size, 50, "%dx%d", rect.right - rect.left, rect.bottom - rect.top);
+
+		AVDictionary *options = NULL;
+		av_dict_set_int(&options, "framerate", fps, AV_DICT_MATCH_CASE);
+		av_dict_set_int(&options, "offset_x", rect.left, AV_DICT_MATCH_CASE);
+		av_dict_set_int(&options, "offset_y", rect.top, AV_DICT_MATCH_CASE);
+		av_dict_set(&options, "video_size", buff_video_size, AV_DICT_MATCH_CASE);
+		av_dict_set_int(&options, "draw_mouse", 1, AV_DICT_MATCH_CASE);
+
+		int ret = 0;
+		do {
+			_fmt_ctx = avformat_alloc_context();
+			_input_fmt = av_find_input_format("gdigrab");
+
+			//the framerate must be same like encoder & muxer 's framerate,otherwise the video can not sync with audio
+			ret = avformat_open_input(&_fmt_ctx, "desktop", _input_fmt, &options);
+			if (ret != 0) {
+				error = AE_FFMPEG_OPEN_INPUT_FAILED;
+				break;
+			}
+
+			ret = avformat_find_stream_info(_fmt_ctx, NULL);
+			if (ret < 0) {
+				error = AE_FFMPEG_FIND_STREAM_FAILED;
+				break;
+			}
+
+			int stream_index = -1;
+			for (int i = 0; i < _fmt_ctx->nb_streams; i++) {
+				if (_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+					stream_index = i;
+					break;
+				}
+			}
+
+			if (stream_index == -1) {
+				error = AE_FFMPEG_FIND_STREAM_FAILED;
+				break;
+			}
+
+			_stream_index = stream_index;
+			_codec_ctx = _fmt_ctx->streams[stream_index]->codec;
+			_codec = avcodec_find_decoder(_codec_ctx->codec_id);
+			if (_codec == NULL) {
+				error = AE_FFMPEG_FIND_DECODER_FAILED;
+				break;
+			}
+
+			ret = avcodec_open2(_codec_ctx, _codec, NULL);
+			if (ret != 0) {
+				error = AE_FFMPEG_OPEN_CODEC_FAILED;
+				break;
+			}
+
+			_start_time = _fmt_ctx->streams[_stream_index]->start_time;
+			_time_base = _fmt_ctx->streams[_stream_index]->time_base;
+			_pixel_fmt = _fmt_ctx->streams[_stream_index]->codec->pix_fmt;
+
+			_inited = true;
+		} while (0);
+
+		if (error != AE_NO) {
+			al_debug("%s,error: %d %lu", err2str(error), ret, GetLastError());
+			clean_up();
+		}
+
+		av_dict_free(&options);
+
+		return error;
+	}
+
+	int record_desktop_ffmpeg_gdi::start()
+	{
+		if (_running == true) {
+			al_warn("record desktop gdi is already running");
+			return AE_NO;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+		_running = true;
+		_thread = std::thread(std::bind(&record_desktop_ffmpeg_gdi::record_func, this));
+
+		return AE_NO;
+	}
+
+	int record_desktop_ffmpeg_gdi::pause()
+	{
+		_paused = true;
+		return AE_NO;
+	}
+
+	int record_desktop_ffmpeg_gdi::resume()
+	{
+		_paused = false;
+		return AE_NO;
+	}
+
+	int record_desktop_ffmpeg_gdi::stop()
+	{
+		_running = false;
+		if (_thread.joinable())
+			_thread.join();
+
+		return AE_NO;
+	}
+
+	void record_desktop_ffmpeg_gdi::clean_up()
+	{
+		if (_codec_ctx)
+			avcodec_close(_codec_ctx);
+
+		if (_fmt_ctx)
+			avformat_close_input(&_fmt_ctx);
+
+		_fmt_ctx = NULL;
+		_input_fmt = NULL;
+		_codec_ctx = NULL;
+		_codec = NULL;
+
+		_stream_index = -1;
+		_inited = false;
+	}
+
+	int record_desktop_ffmpeg_gdi::decode(AVFrame * frame, AVPacket * packet)
+	{
+		int ret = avcodec_send_packet(_codec_ctx, packet);
+		if (ret < 0) {
+			al_error("avcodec_send_packet failed:%d", ret);
+
+			return AE_FFMPEG_DECODE_FRAME_FAILED;
+		}
+
+		while (ret >= 0)
+		{
+			ret = avcodec_receive_frame(_codec_ctx, frame);
+			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+				break;
+			}
+
+			if (ret < 0) {
+				return AE_FFMPEG_READ_FRAME_FAILED;
+			}
+
+			if (ret == 0 && _on_data) {
+				//use relative time instead of device time
+				frame->pts = av_gettime_relative();// -_start_time;
+				frame->pkt_dts = frame->pts;
+				_on_data(frame);
+			}
+
+			av_frame_unref(frame);//need to do this? avcodec_receive_frame said will call unref before receive
+		}
+
+		return AE_NO;
+	}
+
+	void record_desktop_ffmpeg_gdi::record_func()
+	{
+		AVPacket *packet = av_packet_alloc();
+		AVFrame *frame = av_frame_alloc();
+
+		int ret = 0;
+
+		int got_pic = 0;
+		while (_running == true) {
+			ret = av_read_frame(_fmt_ctx, packet);
+
+			if (ret < 0) {
+				if (_on_error) _on_error(AE_FFMPEG_READ_FRAME_FAILED);
+
+				al_fatal("read frame failed:%d", ret);
+				break;
+			}
+
+			if (packet->stream_index == _stream_index) {
+
+				ret = decode(frame, packet);
+				if (ret != AE_NO) {
+					if (_on_error) _on_error(AE_FFMPEG_DECODE_FRAME_FAILED);
+					al_fatal("decode desktop frame failed");
+					break;
+				}
+			}
+
+			av_packet_unref(packet);
+		}
+
+		//flush packet in decoder
+		decode(frame, NULL);
+
+		av_packet_free(&packet);
+		av_frame_free(&frame);
+	}
+
+}

+ 39 - 0
libs/Recorder/record_desktop_ffmpeg_gdi.h

@@ -0,0 +1,39 @@
+#ifndef RECORD_DESKTOP_FFMPEG_GDI
+#define RECORD_DESKTOP_FFMPEG_GDI
+
+#include "record_desktop.h"
+
+namespace am {
+
+	class record_desktop_ffmpeg_gdi :public record_desktop
+	{
+	public:
+		record_desktop_ffmpeg_gdi();
+		~record_desktop_ffmpeg_gdi();
+
+		virtual int init(
+			const RECORD_DESKTOP_RECT &rect,
+			const int fps);
+		
+		virtual int start();
+		virtual int pause();
+		virtual int resume();
+		virtual int stop();
+
+	protected:
+		virtual void clean_up();
+
+	private:
+		int decode(AVFrame *frame, AVPacket *packet);
+
+		void record_func();
+
+		int _stream_index;
+		AVFormatContext *_fmt_ctx;
+		AVInputFormat *_input_fmt;
+		AVCodecContext *_codec_ctx;
+		AVCodec *_codec;
+	};
+
+}
+#endif

+ 300 - 0
libs/Recorder/record_desktop_gdi.cpp

@@ -0,0 +1,300 @@
+#include "record_desktop_gdi.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	record_desktop_gdi::record_desktop_gdi()
+	{
+		_data_type = RECORD_DESKTOP_DATA_TYPES::AT_DESKTOP_BGRA;
+		_buffer = NULL;
+		_buffer_size = 0;
+
+		_draw_cursor = true;
+
+		_hdc = NULL;
+		_bmp = NULL;
+		_bmp_old = NULL;
+		_ci = { 0 };
+	}
+
+	record_desktop_gdi::~record_desktop_gdi()
+	{
+		stop();
+		clean_up();
+	}
+
+	int record_desktop_gdi::init(const RECORD_DESKTOP_RECT & rect, const int fps)
+	{
+		int error = AE_NO;
+		if (_inited == true) {
+			return error;
+		}
+
+		_fps = fps;
+		_rect = rect;
+
+
+		do {
+			_width = rect.right - rect.left;
+			_height = rect.bottom - rect.top;
+			_buffer_size = (_width * 32 + 31) / 32 * _height * 4;
+			_buffer = new uint8_t[_buffer_size];
+
+			_start_time = av_gettime_relative();
+			_time_base = { 1,AV_TIME_BASE };
+			_pixel_fmt = AV_PIX_FMT_BGRA;
+
+
+			_inited = true;
+		} while (0);
+
+		al_info("init gdi finished,error: %s %ld", err2str(error), GetLastError());
+
+		return error;
+	}
+
+	int record_desktop_gdi::start()
+	{
+		if (_running == true) {
+			al_warn("record desktop gdi is already running");
+			return AE_NO;
+		}
+
+		if (_inited == false) {
+			return AE_NEED_INIT;
+		}
+
+		_running = true;
+		_thread = std::thread(std::bind(&record_desktop_gdi::record_func, this));
+
+		return AE_NO;
+	}
+
+	int record_desktop_gdi::pause()
+	{
+		_paused = true;
+		return AE_NO;
+	}
+
+	int record_desktop_gdi::resume()
+	{
+		_paused = false;
+		return AE_NO;
+	}
+
+	int record_desktop_gdi::stop()
+	{
+		_running = false;
+		if (_thread.joinable())
+			_thread.join();
+
+		return AE_NO;
+	}
+
+	void record_desktop_gdi::clean_up()
+	{
+		_inited = false;
+
+		if (_buffer)
+			delete[] _buffer;
+
+		_buffer = nullptr;
+	}
+
+	void record_desktop_gdi::draw_cursor(HDC hdc)
+	{
+		if (!(_ci.flags & CURSOR_SHOWING))
+			return;
+
+		//is cursor in the tartet zone
+		if (_ci.ptScreenPos.x < _rect.left ||
+			_ci.ptScreenPos.x > _rect.right ||
+			_ci.ptScreenPos.y < _rect.top ||
+			_ci.ptScreenPos.y > _rect.bottom
+			)
+			return;
+
+		HICON icon;
+		ICONINFO ii;
+
+		icon = CopyIcon(_ci.hCursor);
+		if (!icon)
+			return;
+
+		int dstx = 0, dsty = 0;
+		dstx = abs(_ci.ptScreenPos.x - _rect.left);
+		dsty = abs(_ci.ptScreenPos.y - _rect.top);
+
+		if (GetIconInfo(icon, &ii)) {
+			POINT pos;
+			DrawIconEx(hdc, dstx, dsty, icon, 0, 0, 0, NULL, DI_NORMAL);
+
+			DeleteObject(ii.hbmColor);
+			DeleteObject(ii.hbmMask);
+		}
+
+		DestroyIcon(icon);
+	}
+
+	int record_desktop_gdi::do_record()
+	{
+		//int width = GetSystemMetrics(SM_CXVIRTUALSCREEN);
+		//int height = GetSystemMetrics(SM_CYVIRTUALSCREEN);
+		HDC hdc_screen = NULL, hdc_mem = NULL;
+		HBITMAP hbm_mem = NULL;
+
+		int error = AE_ERROR;
+
+		do {
+
+			hdc_screen = GetWindowDC(NULL);
+			if (!hdc_screen) {
+				al_error("get window dc failed:%lu", GetLastError());
+				error = AE_GDI_GET_DC_FAILED;
+				break;
+			}
+
+			hdc_mem = CreateCompatibleDC(hdc_screen);
+			if (!hdc_mem) {
+				al_error("create compatible dc failed:%lu", GetLastError());
+				error = AE_GDI_CREATE_DC_FAILED;
+				break;
+			}
+
+			hbm_mem = CreateCompatibleBitmap(hdc_screen, _width, _height);
+			if (!hbm_mem) {
+				al_error("create compatible bitmap failed:%lu", GetLastError());
+				error = AE_GDI_CREATE_BMP_FAILED;
+				break;
+			}
+
+			SelectObject(hdc_mem, hbm_mem);
+
+			//must have CAPTUREBLT falg,otherwise some layered window can not be captured
+			if (!BitBlt(hdc_mem, 0, 0, _width, _height, hdc_screen, _rect.left, _rect.top, SRCCOPY | CAPTUREBLT)) {
+				al_error("bitblt data failed:%lu", GetLastError());
+				//error = AE_GDI_BITBLT_FAILED;
+				//administrator UAC will trigger invalid handle error
+				break;
+			}
+
+			memset(&_ci, 0, sizeof(CURSORINFO));
+			_ci.cbSize = sizeof(CURSORINFO);
+			if (GetCursorInfo(&_ci)) {
+				draw_cursor(hdc_mem);
+			}
+
+			BITMAPINFOHEADER   bi;
+
+			bi.biSize = sizeof(BITMAPINFOHEADER);
+			bi.biWidth = _width;
+			bi.biHeight = _height * (-1);
+			bi.biPlanes = 1;
+			bi.biBitCount = 32;//should get from system color bits
+			bi.biCompression = BI_RGB;
+			bi.biSizeImage = 0;
+			bi.biXPelsPerMeter = 0;
+			bi.biYPelsPerMeter = 0;
+			bi.biClrUsed = 0;
+			bi.biClrImportant = 0;
+
+			//scan colors by line order
+			int ret = GetDIBits(hdc_mem, hbm_mem, 0, _height, _buffer, (BITMAPINFO*)&bi, DIB_RGB_COLORS);
+			if (ret <= 0 || ret == ERROR_INVALID_PARAMETER) {
+				al_error("get dibits failed:%lu", GetLastError());
+				error = AE_GDI_GET_DIBITS_FAILED;
+				break;
+			}
+
+#if 0
+			//save bmp to test
+			BITMAPFILEHEADER bf;
+			bf.bfType = 0x4d42;
+			bf.bfReserved1 = 0;
+			bf.bfReserved2 = 0;
+			bf.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
+			bf.bfSize = bf.bfOffBits + _width * _height * 4;
+
+			FILE *fp = fopen("..\\..\\save.bmp", "wb+");
+
+			fwrite(&bf, 1, sizeof(bf), fp);
+			fwrite(&bi, 1, sizeof(bi), fp);
+			fwrite(_buffer, 1, _buffer_size, fp);
+
+			fflush(fp);
+			fclose(fp);
+#endif
+			error = AE_NO;
+		} while (0);
+
+		if(hbm_mem)
+			DeleteObject(hbm_mem);
+
+		if(hdc_mem)
+			DeleteObject(hdc_mem);
+
+		if(hdc_screen)
+			ReleaseDC(NULL, hdc_screen);
+
+		return  AE_NO;
+	}
+
+	void record_desktop_gdi::do_sleep(int64_t dur, int64_t pre, int64_t now)
+	{
+		int64_t delay = now - pre;
+		dur = delay > dur ? max(0, dur - (delay - dur)) : (dur + dur - delay);
+
+		//al_debug("%lld", delay);
+
+		if(dur)
+			av_usleep(dur);
+	}
+
+	void record_desktop_gdi::record_func()
+	{
+		AVFrame *frame = av_frame_alloc();
+
+		int64_t pre_pts = 0;
+		int64_t dur = AV_TIME_BASE / _fps;
+
+		int ret = AE_NO;
+		while (_running)
+		{
+			ret = do_record();
+			if (ret != AE_NO) {
+				if (_on_error) _on_error(ret);
+				break;
+			}
+
+			frame->pts = av_gettime_relative();
+			frame->pkt_dts = frame->pts;
+
+			frame->width = _width;
+			frame->height = _height;
+			frame->format = AV_PIX_FMT_BGRA;
+			frame->pict_type = AV_PICTURE_TYPE_I;
+			frame->pkt_size = _width * _height * 4;
+
+			av_image_fill_arrays(frame->data, 
+				frame->linesize, 
+				_buffer,
+				AV_PIX_FMT_BGRA,
+				_width,
+				_height,
+				1
+			);
+
+			if (_on_data) _on_data(frame);
+
+			do_sleep(dur, pre_pts, frame->pts);
+
+			pre_pts = frame->pts;
+		}
+
+		av_frame_free(&frame);
+	}
+
+
+}

+ 51 - 0
libs/Recorder/record_desktop_gdi.h

@@ -0,0 +1,51 @@
+#ifndef RECORD_DESKTOP_GDI
+#define RECORD_DESKTOP_GDI
+
+#include "record_desktop.h"
+
+#include <Windows.h>
+
+namespace am {
+
+	class record_desktop_gdi :
+		public record_desktop
+	{
+	public:
+		record_desktop_gdi();
+		~record_desktop_gdi();
+
+		virtual int init(
+			const RECORD_DESKTOP_RECT &rect,
+			const int fps);
+
+		virtual int start();
+		virtual int pause();
+		virtual int resume();
+		virtual int stop();
+
+	protected:
+		virtual void clean_up();
+
+	private:
+		void draw_cursor(HDC hdc);
+
+		int do_record();
+
+		void do_sleep(int64_t dur, int64_t pre, int64_t now);
+
+		void record_func();
+
+		uint8_t *_buffer;
+		uint32_t _buffer_size;
+		uint32_t _width, _height;
+
+		std::atomic_bool _draw_cursor;
+
+		HDC _hdc;
+		HBITMAP _bmp, _bmp_old;
+		CURSORINFO _ci;
+	};
+
+}
+
+#endif

+ 389 - 0
libs/Recorder/record_desktop_mag.cpp

@@ -0,0 +1,389 @@
+#include "record_desktop_mag.h"
+
+#include "system_lib.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+namespace {
+// kMagnifierWindowClass has to be "Magnifier" according to the Magnification
+// API. The other strings can be anything.
+static wchar_t kMagnifierHostClass[] = L"ScreenCapturerWinMagnifierHost";
+static wchar_t kHostWindowName[] = L"MagnifierHost";
+static wchar_t kMagnifierWindowClass[] = L"Magnifier";
+static wchar_t kMagnifierWindowName[] = L"MagnifierWindow";
+
+DWORD GetTlsIndex() {
+  static const DWORD tls_index = TlsAlloc();
+  return tls_index;
+}
+} // namespace
+
+BOOL __stdcall record_desktop_mag::on_mag_scaling_callback(
+    HWND hwnd, void *srcdata, MAGIMAGEHEADER srcheader, void *destdata,
+    MAGIMAGEHEADER destheader, RECT unclipped, RECT clipped, HRGN dirty) {
+  record_desktop_mag *owner =
+      reinterpret_cast<record_desktop_mag *>(TlsGetValue(GetTlsIndex()));
+  TlsSetValue(GetTlsIndex(), nullptr);
+  owner->on_mag_data(srcdata, srcheader);
+  return TRUE;
+}
+
+record_desktop_mag::record_desktop_mag() {}
+
+record_desktop_mag::~record_desktop_mag() {}
+
+int record_desktop_mag::init(const RECORD_DESKTOP_RECT &rect, const int fps) {
+  if (_inited == true) {
+    return AE_NO;
+  }
+
+  _fps = fps;
+  _rect = rect;
+  _width = rect.right - rect.left;
+  _height = rect.bottom - rect.top;
+
+  _start_time = av_gettime_relative();
+  _time_base = {1, AV_TIME_BASE};
+  _pixel_fmt = AV_PIX_FMT_BGRA;
+
+  _inited = true;
+
+  return AE_NO;
+}
+
+int record_desktop_mag::start() {
+  if (_running == true) {
+    al_warn("record desktop gdi is already running");
+    return AE_NO;
+  }
+
+  if (_inited == false) {
+    return AE_NEED_INIT;
+  }
+
+  _running = true;
+  _thread = std::thread(std::bind(&record_desktop_mag::record_func, this));
+
+  return AE_NO;
+}
+
+int record_desktop_mag::pause() {
+  _paused = true;
+  return AE_NO;
+}
+
+int record_desktop_mag::resume() {
+  _paused = false;
+  return AE_NO;
+}
+
+int record_desktop_mag::stop() {
+  _running = false;
+  if (_thread.joinable())
+    _thread.join();
+
+  return AE_NO;
+}
+
+void record_desktop_mag::clean_up() {
+  // DestroyWindow must be called before MagUninitialize. _magnifier_window is
+  // destroyed automatically when _host_window is destroyed.
+  if (_host_window)
+    DestroyWindow(_host_window);
+  if (_magnifier_initialized)
+    _mag_uninitialize_func();
+  if (_mag_lib_handle)
+    free_system_library(_mag_lib_handle);
+  if (_desktop_dc)
+    ReleaseDC(NULL, _desktop_dc);
+
+  _inited = false;
+}
+
+void record_desktop_mag::record_func() {
+  int64_t pre_pts = 0;
+  int64_t dur = AV_TIME_BASE / _fps;
+
+  int ret = AE_NO;
+
+  // must call this in a new thread, otherwise SetWindowPos will stuck before
+  // capture
+  if (!do_mag_initialize()) {
+    al_info("Failed to initialize ScreenCapturerWinMagnifier.");
+    if (_on_error)
+      _on_error(AE_NEED_INIT);
+
+    return;
+  }
+
+  while (_running) {
+    ret = do_mag_record();
+    if (ret != AE_NO) {
+      if (_on_error)
+        _on_error(ret);
+      break;
+    }
+
+    do_sleep(dur, pre_pts, _current_pts);
+
+    pre_pts = _current_pts;
+  }
+}
+
+void record_desktop_mag::do_sleep(int64_t dur, int64_t pre, int64_t now) {
+  int64_t delay = now - pre;
+  dur = delay > dur ? max(0, dur - (delay - dur)) : (dur + dur - delay);
+
+  // al_debug("%lld", delay);
+
+  if (dur)
+    av_usleep(dur);
+}
+
+bool record_desktop_mag::do_mag_initialize() {
+#if 0 // we can handle crash
+      if (GetSystemMetrics(SM_CMONITORS) != 1) {
+    // Do not try to use the magnifier in multi-screen setup (where the API
+    // crashes sometimes).
+    al_info("Magnifier capturer cannot work on multi-screen system.");
+    return false;
+  }
+#endif
+
+  _desktop_dc = GetDC(nullptr);
+  _mag_lib_handle = load_system_library("Magnification.dll");
+  if (!_mag_lib_handle)
+    return false;
+  // Initialize Magnification API function pointers.
+  _mag_initialize_func = reinterpret_cast<MagInitializeFunc>(
+      GetProcAddress(_mag_lib_handle, "MagInitialize"));
+  _mag_uninitialize_func = reinterpret_cast<MagUninitializeFunc>(
+      GetProcAddress(_mag_lib_handle, "MagUninitialize"));
+  _mag_set_window_source_func = reinterpret_cast<MagSetWindowSourceFunc>(
+      GetProcAddress(_mag_lib_handle, "MagSetWindowSource"));
+  _mag_set_window_filter_list_func =
+      reinterpret_cast<MagSetWindowFilterListFunc>(
+          GetProcAddress(_mag_lib_handle, "MagSetWindowFilterList"));
+  _mag_set_image_scaling_callback_func =
+      reinterpret_cast<MagSetImageScalingCallbackFunc>(
+          GetProcAddress(_mag_lib_handle, "MagSetImageScalingCallback"));
+  if (!_mag_initialize_func || !_mag_uninitialize_func ||
+      !_mag_set_window_source_func || !_mag_set_window_filter_list_func ||
+      !_mag_set_image_scaling_callback_func) {
+    al_info(
+        "Failed to initialize ScreenCapturerWinMagnifier: library functions "
+        "missing.");
+    return false;
+  }
+
+  BOOL result = _mag_initialize_func();
+  if (!result) {
+    al_info("Failed to initialize ScreenCapturerWinMagnifier: error from "
+            "MagInitialize %ld",
+            GetLastError());
+    return false;
+  }
+  HMODULE hInstance = nullptr;
+  result =
+      GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+                             GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+                         reinterpret_cast<char *>(&DefWindowProc), &hInstance);
+  if (!result) {
+    _mag_uninitialize_func();
+    al_info("Failed to initialize ScreenCapturerWinMagnifier: "
+            "error from GetModulehandleExA %ld",
+            GetLastError());
+    return false;
+  }
+  // Register the host window class. See the MSDN documentation of the
+  // Magnification API for more infomation.
+  WNDCLASSEXW wcex = {};
+  wcex.cbSize = sizeof(WNDCLASSEX);
+  wcex.lpfnWndProc = &DefWindowProc;
+  wcex.hInstance = hInstance;
+  wcex.hCursor = LoadCursor(nullptr, IDC_ARROW);
+  wcex.lpszClassName = kMagnifierHostClass;
+  // Ignore the error which may happen when the class is already registered.
+  RegisterClassExW(&wcex);
+  // Create the host window.
+  _host_window =
+      CreateWindowExW(WS_EX_LAYERED, kMagnifierHostClass, kHostWindowName, 0, 0,
+                      0, 0, 0, nullptr, nullptr, hInstance, nullptr);
+  if (!_host_window) {
+    _mag_uninitialize_func();
+    al_info("Failed to initialize ScreenCapturerWinMagnifier: "
+            "error from creating host window %ld",
+            GetLastError());
+    return false;
+  }
+  // Create the magnifier control.
+  _magnifier_window = CreateWindowW(kMagnifierWindowClass, kMagnifierWindowName,
+                                    WS_CHILD | WS_VISIBLE, 0, 0, 0, 0,
+                                    _host_window, nullptr, hInstance, nullptr);
+  if (!_magnifier_window) {
+    _mag_uninitialize_func();
+    al_info("Failed to initialize ScreenCapturerWinMagnifier: "
+            "error from creating magnifier window %ld",
+            GetLastError());
+    return false;
+  }
+  // Hide the host window.
+  ShowWindow(_host_window, SW_HIDE);
+  // Set the scaling callback to receive captured image.
+  result = _mag_set_image_scaling_callback_func(
+      _magnifier_window, &record_desktop_mag::on_mag_scaling_callback);
+  if (!result) {
+    _mag_uninitialize_func();
+    al_info("Failed to initialize ScreenCapturerWinMagnifier: "
+            "error from MagSetImageScalingCallback %ld",
+            GetLastError());
+    return false;
+  }
+  if (_excluded_window) {
+    result = _mag_set_window_filter_list_func(
+        _magnifier_window, MW_FILTERMODE_EXCLUDE, 1, &_excluded_window);
+    if (!result) {
+      _mag_uninitialize_func();
+      al_warn("Failed to initialize ScreenCapturerWinMagnifier: "
+              "error from MagSetWindowFilterList %ld",
+              GetLastError());
+      return false;
+    }
+  }
+  _magnifier_initialized = true;
+  return true;
+}
+
+int record_desktop_mag::do_mag_record() {
+  if (!_magnifier_initialized) {
+    al_error("Magnifier initialization failed.");
+    return AE_NEED_INIT;
+  }
+
+  auto capture_image = [&](const RECORD_DESKTOP_RECT &rect) {
+    // Set the magnifier control to cover the captured rect. The content of the
+    // magnifier control will be the captured image.
+
+    BOOL result =
+        SetWindowPos(_magnifier_window, NULL, rect.left, rect.top,
+                     rect.right - rect.left, rect.bottom - rect.top, 0);
+    if (!result) {
+      al_error("Failed to call SetWindowPos: %ld. Rect = {%d, %d, %d, %d}",
+               GetLastError(), rect.left, rect.top, rect.right, rect.bottom);
+      return false;
+    }
+
+    _magnifier_capture_succeeded = false;
+    RECT native_rect = {rect.left, rect.top, rect.right, rect.bottom};
+    TlsSetValue(GetTlsIndex(), this);
+
+    // on_mag_data will be called via on_mag_scaling_callback and fill in the
+    // frame before _mag_set_window_source_func returns.
+    DWORD exception = 0;
+    result =
+        seh_mag_set_window_source(_magnifier_window, native_rect, exception);
+    if (!result) {
+      al_error("Failed to call MagSetWindowSource: %ld Exception: %ld. Rect = {%d, %d, %d, %d}",
+               GetLastError(), exception, rect.left, rect.top, rect.right,
+               rect.bottom);
+      return false;
+    }
+    return _magnifier_capture_succeeded;
+  };
+
+#if 0
+  // Switch to the desktop receiving user input if different from the current
+  // one.
+  std::unique_ptr<Desktop> input_desktop(Desktop::GetInputDesktop());
+  if (input_desktop.get() != NULL && !desktop_.IsSame(*input_desktop)) {
+    // Release GDI resources otherwise SetThreadDesktop will fail.
+    if (_desktop_dc) {
+      ReleaseDC(NULL, _desktop_dc);
+      _desktop_dc = NULL;
+    }
+    // If SetThreadDesktop() fails, the thread is still assigned a desktop.
+    // So we can continue capture screen bits, just from the wrong desktop.
+    desktop_.SetThreadDesktop(input_desktop.release());
+  }
+
+  DesktopRect rect = GetScreenRect(current_screen_id_, current_device_key_);
+#endif
+
+  // capture_image may fail in some situations, e.g. windows8 metro mode. So
+  // defer to the fallback capturer if magnifier capturer did not work.
+  if (!capture_image(_rect)) {
+    al_error("Magnifier capturer failed to capture a frame.");
+    return AE_ERROR;
+  }
+
+  return AE_NO;
+}
+
+void record_desktop_mag::set_exclude(HWND excluded_window) {
+  _excluded_window = excluded_window;
+  if (_excluded_window && _magnifier_initialized) {
+    _mag_set_window_filter_list_func(_magnifier_window, MW_FILTERMODE_EXCLUDE,
+                                     1, &_excluded_window);
+  }
+}
+
+void record_desktop_mag::on_mag_data(void *data, const MAGIMAGEHEADER &header) {
+  const int kBytesPerPixel = 4;
+
+  int captured_bytes_per_pixel = header.cbSize / header.width / header.height;
+  if (header.format != GUID_WICPixelFormat32bppRGBA ||
+      header.width != static_cast<UINT>(_width) ||
+      header.height != static_cast<UINT>(_height) ||
+      header.stride != static_cast<UINT>(kBytesPerPixel * _width) ||
+      captured_bytes_per_pixel != kBytesPerPixel) {
+    al_warn("Output format does not match the captured format: width = %d, "
+            "height = %d, stride= %d, bpp = %d, pixel format RGBA ? %d.",
+            header.width, header.height, header.stride,
+            captured_bytes_per_pixel,
+            (header.format == GUID_WICPixelFormat32bppRGBA));
+    return;
+  }
+
+  _current_pts = av_gettime_relative();
+
+  AVFrame *frame = av_frame_alloc();
+  frame->pts = _current_pts;
+  frame->pkt_dts = frame->pts;
+
+  frame->width = _width;
+  frame->height = _height;
+  frame->format = AV_PIX_FMT_BGRA;
+  frame->pict_type = AV_PICTURE_TYPE_I;
+  frame->pkt_size = _width * _height * 4;
+
+  av_image_fill_arrays(frame->data, frame->linesize,
+                       reinterpret_cast<uint8_t *>(data), AV_PIX_FMT_BGRA,
+                       _width, _height, 1);
+
+  if (_on_data)
+    _on_data(frame);
+
+  av_frame_free(&frame);
+
+  _magnifier_capture_succeeded = true;
+}
+
+bool record_desktop_mag::seh_mag_set_window_source(HWND hwnd, RECT rect,
+                                                   DWORD &exception) {
+  if (!_mag_set_window_source_func)
+    return false;
+
+  __try {
+    return _mag_set_window_source_func(hwnd, rect);
+  } __except (EXCEPTION_EXECUTE_HANDLER) {
+    exception = ::GetExceptionCode();
+    return false;
+  }
+
+  return false;
+}
+
+} // namespace am

+ 93 - 0
libs/Recorder/record_desktop_mag.h

@@ -0,0 +1,93 @@
+#pragma once
+
+// documents
+// https://learn.microsoft.com/en-us/previous-versions/windows/desktop/magapi/magapi-intro
+// https://learn.microsoft.com/en-us/previous-versions/windows/desktop/magapi/magapi-whats-new
+// https://github.com/microsoft/Windows-classic-samples/blob/main/Samples/Magnification/cpp/Windowed/MagnifierSample.cpp
+
+#include <magnification.h>
+
+#include "record_desktop.h"
+
+namespace am {
+class record_desktop_mag : public record_desktop {
+  typedef BOOL(WINAPI *MagImageScalingCallback)(
+      HWND hwnd, void *srcdata, MAGIMAGEHEADER srcheader, void *destdata,
+      MAGIMAGEHEADER destheader, RECT unclipped, RECT clipped, HRGN dirty);
+  typedef BOOL(WINAPI *MagInitializeFunc)(void);
+  typedef BOOL(WINAPI *MagUninitializeFunc)(void);
+  typedef BOOL(WINAPI *MagSetWindowSourceFunc)(HWND hwnd, RECT rect);
+  typedef BOOL(WINAPI *MagSetWindowFilterListFunc)(HWND hwnd,
+                                                   DWORD dwFilterMode,
+                                                   int count, HWND *pHWND);
+  typedef BOOL(WINAPI *MagSetImageScalingCallbackFunc)(
+      HWND hwnd, MagImageScalingCallback callback);
+
+  static BOOL WINAPI on_mag_scaling_callback(
+      HWND hwnd, void *srcdata, MAGIMAGEHEADER srcheader, void *destdata,
+      MAGIMAGEHEADER destheader, RECT unclipped, RECT clipped, HRGN dirty);
+
+public:
+  record_desktop_mag();
+  ~record_desktop_mag() override;
+
+  int init(const RECORD_DESKTOP_RECT &rect, const int fps) override;
+
+  int start() override;
+  int pause() override;
+  int resume() override;
+  int stop() override;
+
+  void set_exclude(HWND excluded_window);
+
+protected:
+  void clean_up() override;
+
+private:
+  void record_func();
+
+  void do_sleep(int64_t dur, int64_t pre, int64_t now);
+
+  bool do_mag_initialize();
+
+  int do_mag_record();
+
+  void on_mag_data(void *data, const MAGIMAGEHEADER &header);
+
+  bool seh_mag_set_window_source(HWND hwnd, RECT rect, DWORD &exception);
+
+private:
+  uint32_t _width = 0;
+  uint32_t _height = 0;
+  int64_t _current_pts = -1;
+
+  // Used to exclude window with specified window id.
+  HWND _excluded_window = NULL;
+
+  // Used for getting the screen dpi.
+  HDC _desktop_dc = NULL;
+
+  // Module handler
+  HMODULE _mag_lib_handle = NULL;
+
+  // Mag functions
+  MagInitializeFunc _mag_initialize_func = nullptr;
+  MagUninitializeFunc _mag_uninitialize_func = nullptr;
+  MagSetWindowSourceFunc _mag_set_window_source_func = nullptr;
+  MagSetWindowFilterListFunc _mag_set_window_filter_list_func = nullptr;
+  MagSetImageScalingCallbackFunc _mag_set_image_scaling_callback_func = nullptr;
+
+  // The hidden window hosting the magnifier control.
+  HWND _host_window = NULL;
+
+  // The magnifier control that captures the screen.
+  HWND _magnifier_window = NULL;
+
+  // True if the magnifier control has been successfully initialized.
+  bool _magnifier_initialized = false;
+
+  // True if the last OnMagImageScalingCallback was called and handled
+  // successfully. Reset at the beginning of each CaptureImage call.
+  bool _magnifier_capture_succeeded = true;
+};
+} // namespace am

+ 154 - 0
libs/Recorder/record_desktop_wgc.cpp

@@ -0,0 +1,154 @@
+#include "record_desktop_wgc.h"
+
+#include "utils_string.h"
+
+#include "system_error.h"
+#include "error_define.h"
+#include "log_helper.h"
+
+BOOL WINAPI EnumMonitorProc(HMONITOR hmonitor, HDC hdc, LPRECT lprc,
+                            LPARAM data) {
+
+  MONITORINFOEX info_ex;
+  info_ex.cbSize = sizeof(MONITORINFOEX);
+
+  GetMonitorInfo(hmonitor, &info_ex);
+
+  if (info_ex.dwFlags == DISPLAY_DEVICE_MIRRORING_DRIVER)
+    return true;
+
+  if (info_ex.dwFlags & MONITORINFOF_PRIMARY) {
+    *(HMONITOR *)data = hmonitor;
+  }
+
+  return true;
+}
+
+HMONITOR GetPrimaryMonitor() {
+  HMONITOR hmonitor = nullptr;
+
+  ::EnumDisplayMonitors(NULL, NULL, EnumMonitorProc, (LPARAM)&hmonitor);
+
+  return hmonitor;
+}
+
+namespace am {
+
+
+record_desktop_wgc::record_desktop_wgc() {}
+
+record_desktop_wgc::~record_desktop_wgc() {
+  stop();
+  clean_up();
+}
+
+int record_desktop_wgc::init(const RECORD_DESKTOP_RECT &rect, const int fps) {
+  int error = AE_NO;
+  if (_inited == true)
+    return error;
+
+  _fps = fps;
+  _rect = rect;
+  _start_time = av_gettime_relative();
+  _time_base = {1, AV_TIME_BASE};
+  _pixel_fmt = AV_PIX_FMT_BGRA;
+
+  do {
+    if (!module_.is_supported()) {
+      error = AE_UNSUPPORT;
+      break;
+    }
+
+    session_ = module_.create_session();
+    if (!session_) {
+      error = AE_WGC_CREATE_CAPTURER_FAILED;
+      break;
+    }
+
+    session_->register_observer(this);
+
+    error = session_->initialize(GetPrimaryMonitor());
+
+    _inited = true;
+  } while (0);
+
+  if (error != AE_NO) {
+    al_debug("%s,last error:%s", err2str(error),
+             system_error::error2str(GetLastError()).c_str());
+  }
+
+  return error;
+}
+
+int record_desktop_wgc::start() {
+  if (_running == true) {
+    al_warn("record desktop duplication is already running");
+    return AE_NO;
+  }
+
+  if (_inited == false) {
+    return AE_NEED_INIT;
+  }
+
+  _running = true;
+  session_->start();
+
+  return AE_NO;
+}
+
+int record_desktop_wgc::pause() {
+  _paused = true;
+  if (session_)
+    session_->pause();
+  return AE_NO;
+}
+
+int record_desktop_wgc::resume() {
+  _paused = false;
+  if (session_)
+    session_->resume();
+  return AE_NO;
+}
+
+int record_desktop_wgc::stop() {
+  _running = false;
+
+  if (session_)
+    session_->stop();
+
+  return AE_NO;
+}
+
+void record_desktop_wgc::on_frame(const wgc_session::wgc_session_frame &frame) {
+  al_debug("wgc on frame");
+  AVFrame *av_frame = av_frame_alloc();
+
+  av_frame->pts = av_gettime_relative();
+  av_frame->pkt_dts = av_frame->pts;
+  av_frame->pkt_pts = av_frame->pts;
+
+  av_frame->width = frame.width;
+  av_frame->height = frame.height;
+  av_frame->format = AV_PIX_FMT_BGRA;
+  av_frame->pict_type = AV_PICTURE_TYPE_NONE;
+  av_frame->pkt_size = frame.width * frame.height * 4;
+
+  av_image_fill_arrays(av_frame->data, av_frame->linesize, frame.data,
+                       AV_PIX_FMT_BGRA, frame.width, frame.height, 1);
+
+  if (_on_data)
+    _on_data(av_frame);
+
+  av_frame_free(&av_frame);
+}
+
+void record_desktop_wgc::clean_up() {
+  _inited = false;
+
+  if (session_)
+    session_->release();
+
+  session_ = nullptr;
+}
+
+} // namespace am

+ 67 - 0
libs/Recorder/record_desktop_wgc.h

@@ -0,0 +1,67 @@
+#pragma once
+
+#include "../WGC/export.h"
+#include "record_desktop.h"
+
+#include <Windows.h>
+
+namespace am {
+class record_desktop_wgc : public record_desktop,
+                           public wgc_session::wgc_session_observer {
+  class wgc_session_module {
+    using func_type_is_supported = bool (*)();
+    using func_type_create_session = wgc_session *(*)();
+
+  public:
+    wgc_session_module() { 
+      module_ = ::LoadLibraryA("WGC.dll");
+      if (module_) {
+        func_is_supported_ = (func_type_is_supported)::GetProcAddress(
+            module_, "wgc_is_supported");
+        func_create_session_ = (func_type_create_session)::GetProcAddress(
+            module_, "wgc_create_session");
+      }
+    }
+    ~wgc_session_module() {
+      if (module_)
+        ::FreeModule(module_);
+    }
+
+    bool is_supported() const {
+      return func_create_session_ && func_is_supported_();
+    }
+
+    wgc_session *create_session() const { 
+      if (!func_create_session_)
+        return nullptr;
+
+      return func_create_session_(); 
+    }
+
+  private:
+    HMODULE module_ = nullptr;
+    func_type_is_supported func_is_supported_ = nullptr;
+    func_type_create_session func_create_session_ = nullptr;
+  };
+
+public:
+  record_desktop_wgc();
+  ~record_desktop_wgc();
+
+  int init(const RECORD_DESKTOP_RECT &rect, const int fps) override;
+
+  int start() override;
+  int pause() override;
+  int resume() override;
+  int stop() override;
+
+  void on_frame(const wgc_session::wgc_session_frame &frame) override;
+
+protected:
+  void clean_up() override;
+
+private:
+  wgc_session *session_ = nullptr;
+  wgc_session_module module_;
+};
+} // namespace am

+ 294 - 0
libs/Recorder/remuxer_ffmpeg.cpp

@@ -0,0 +1,294 @@
+#include "remuxer_ffmpeg.h"
+
+#include <mutex>
+
+#include "headers_ffmpeg.h"
+#include "utils_string.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+	static remuxer_ffmpeg *_g_instance = nullptr;
+	static std::mutex _g_mutex;
+
+	static void process_packet(AVPacket *pkt, AVStream *in_stream,
+		AVStream *out_stream)
+	{
+		pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base,
+			out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
+		pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base,
+			out_stream->time_base,
+			(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
+		pkt->duration = (int)av_rescale_q(pkt->duration, in_stream->time_base,
+			out_stream->time_base);
+		pkt->pos = -1;
+	}
+
+	static int remux_file(AVFormatContext *ctx_src, AVFormatContext *ctx_dst, REMUXER_PARAM *param)
+	{
+		AVPacket pkt;
+
+		int ret, throttle = 0, error = AE_NO;
+
+		for (;;) {
+			ret = av_read_frame(ctx_src, &pkt);
+			if (ret < 0) {
+				if (ret != AVERROR_EOF)
+					error = AE_FFMPEG_READ_FRAME_FAILED;
+				break;
+			}
+
+			if (param->cb_progress != NULL && throttle++ > 10) {
+				float progress = pkt.pos / (float)param->src_size * 100.f;
+				param->cb_progress(param->src, progress, 100);
+				throttle = 0;
+			}
+
+			process_packet(&pkt, ctx_src->streams[pkt.stream_index],
+				ctx_dst->streams[pkt.stream_index]);
+
+			ret = av_interleaved_write_frame(ctx_dst, &pkt);
+			av_packet_unref(&pkt);
+
+			// Sometimes the pts and dts will equal to last packet,
+			// don not know why,may the time base issue?
+			// So return -22 do not care for now
+			if (ret < 0 && ret != -22) {
+				error = AE_FFMPEG_WRITE_FRAME_FAILED;
+				break;
+			}
+		}
+
+		return error;
+	}
+
+	static int open_src(AVFormatContext **ctx, const char *path) {
+		int ret = avformat_open_input(ctx, path, NULL, NULL);
+		if (ret < 0) {
+			return AE_FFMPEG_OPEN_INPUT_FAILED;
+		}
+
+		ret = avformat_find_stream_info(*ctx, NULL);
+		if (ret < 0) {
+			return AE_FFMPEG_FIND_STREAM_FAILED;
+		}
+
+#ifdef _DEBUG
+		av_dump_format(*ctx, 0, path, false);
+#endif
+		return AE_NO;
+	}
+
+	int open_dst(AVFormatContext **ctx_dst, const char *path, AVFormatContext *ctx_src) {
+		int ret;
+
+		avformat_alloc_output_context2(ctx_dst, NULL, NULL,
+			path);
+		if (!*ctx_dst) {
+			return AE_FFMPEG_ALLOC_CONTEXT_FAILED;
+		}
+
+		for (unsigned i = 0; i < ctx_src->nb_streams; i++) {
+			AVStream *in_stream = ctx_src->streams[i];
+			AVStream *out_stream = avformat_new_stream(
+				*ctx_dst, in_stream->codec->codec);
+			if (!out_stream) {
+				return AE_FFMPEG_NEW_STREAM_FAILED;
+			}
+
+#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 48, 101)
+			AVCodecParameters *par = avcodec_parameters_alloc();
+			ret = avcodec_parameters_from_context(par, in_stream->codec);
+			if (ret == 0)
+				ret = avcodec_parameters_to_context(out_stream->codec,
+					par);
+			avcodec_parameters_free(&par);
+#else
+			ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
+#endif
+
+			if (ret < 0) {
+				return AE_FFMPEG_COPY_PARAMS_FAILED;
+			}
+			out_stream->time_base = out_stream->codec->time_base;
+
+			av_dict_copy(&out_stream->metadata, in_stream->metadata, 0);
+
+			out_stream->codec->codec_tag = 0;
+			if ((*ctx_dst)->oformat->flags & AVFMT_GLOBALHEADER)
+				out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+		}
+
+#ifndef _NDEBUG
+		av_dump_format(*ctx_dst, 0, path, true);
+#endif
+
+		if (!((*ctx_dst)->oformat->flags & AVFMT_NOFILE)) {
+			ret = avio_open(&(*ctx_dst)->pb, path,
+				AVIO_FLAG_WRITE);
+			if (ret < 0) {
+				return AE_FFMPEG_OPEN_IO_FAILED;
+			}
+		}
+
+		return AE_NO;
+	}
+
+	static void remuxing(REMUXER_PARAM *param) {
+		al_debug("remuxing:%s", param->src);
+
+		int error = AE_NO;
+
+		AVFormatContext *ctx_src = nullptr, *ctx_dst = nullptr;
+
+		//call back start
+		if (param->cb_state)
+			param->cb_state(param->src, 1, AE_NO);
+
+		do {
+			error = open_src(&ctx_src, utils_string::ascii_utf8(param->src).c_str());
+			if (error != AE_NO) {
+				break;
+			}
+
+			error = open_dst(&ctx_dst, utils_string::ascii_utf8(param->dst).c_str(), ctx_src);
+			if (error != AE_NO) {
+				break;
+			}
+
+			int ret = avformat_write_header(ctx_dst, NULL);
+			if (ret < 0) {
+				error = AE_FFMPEG_WRITE_HEADER_FAILED;
+				break;
+			}
+
+			error = remux_file(ctx_src, ctx_dst, param);
+			if (error != AE_NO) {
+				av_write_trailer(ctx_dst);
+				break;
+			}
+
+			ret = av_write_trailer(ctx_dst);
+			if (ret < 0)
+				error = AE_FFMPEG_WRITE_TRAILER_FAILED;
+
+
+		} while (0);
+
+
+		if (ctx_src) {
+			avformat_close_input(&ctx_src);
+		}
+
+		if (ctx_dst && !(ctx_dst->oformat->flags & AVFMT_NOFILE))
+			avio_close(ctx_dst->pb);
+
+		if (ctx_dst)
+			avformat_free_context(ctx_dst);
+
+		al_debug("remux %s to %s end with error:%s",
+			param->src, param->dst, err2str(error));
+
+		//call back end
+		if (param->cb_state)
+			param->cb_state(param->src, 0, error);
+
+		remuxer_ffmpeg::instance()->remove_remux(param->src);
+	}
+
+	remuxer_ffmpeg * remuxer_ffmpeg::instance()
+	{
+		std::lock_guard<std::mutex> lock(_g_mutex);
+
+		if (_g_instance == nullptr) _g_instance = new remuxer_ffmpeg();
+
+		return _g_instance;
+	}
+
+	void remuxer_ffmpeg::release()
+	{
+		std::lock_guard<std::mutex> lock(_g_mutex);
+
+		if (_g_instance)
+			delete _g_instance;
+
+		_g_instance = nullptr;
+	}
+
+
+
+	int remuxer_ffmpeg::create_remux(const REMUXER_PARAM & param)
+	{
+		std::lock_guard<std::mutex> lock(_g_mutex);
+
+		auto itr = _handlers.find(param.src);
+		if (itr != _handlers.end() && itr->second->fn.joinable() == true) {
+			return AE_REMUX_RUNNING;
+		}
+
+
+		if (!strlen(param.src) || !strlen(param.dst) || !strcmp(param.src, param.dst))
+			return AE_REMUX_INVALID_INOUT;
+
+#ifdef _MSC_VER
+		struct _stat64 st = { 0 };
+		_stat64(param.src, &st);
+#else
+		struct stat st = { 0 };
+		stat(param.src, &st);
+#endif
+
+		if (!st.st_size) return AE_REMUX_NOT_EXIST;
+
+
+		if (itr != _handlers.end()) {
+			delete itr->second;
+
+			_handlers.erase(itr);
+		}
+
+		REMUXER_HANDLE *handle = new REMUXER_HANDLE;
+
+		memcpy(&(handle->param), &param, sizeof(REMUXER_PARAM));
+
+		handle->param.running = true;
+		handle->param.src_size = st.st_size;
+		handle->fn = std::thread(remuxing, &handle->param);
+
+		_handlers[param.src] = handle;
+
+		return AE_NO;
+	}
+
+	void remuxer_ffmpeg::remove_remux(std::string src)
+	{
+		std::lock_guard<std::mutex> lock(_g_mutex);
+
+		auto itr = _handlers.find(src);
+		if (itr != _handlers.end()) {
+			itr->second->fn.detach();
+
+			delete itr->second;
+			_handlers.erase(itr);
+		}
+	}
+
+	void remuxer_ffmpeg::destroy_remux()
+	{
+		std::lock_guard<std::mutex> lock(_g_mutex);
+
+		for (auto itr = _handlers.begin(); itr != _handlers.end(); itr++)
+		{
+			itr->second->param.running = false;
+
+			if (itr->second->fn.joinable())
+				itr->second->fn.join();
+
+			delete itr->second;
+
+			_handlers.erase(itr);
+		}
+	}
+
+}

+ 56 - 0
libs/Recorder/remuxer_ffmpeg.h

@@ -0,0 +1,56 @@
+#ifndef REMUXER_FFMPEG
+#define REMUXER_FFMPEG
+
+#include <map>
+#include <atomic>
+#include <functional>
+#include <thread>
+#include <string>
+
+namespace am {
+	typedef void(*cb_remux_progress)(const char *, int, int);
+	typedef void(*cb_remux_state)(const char *, int, int);
+
+	typedef struct _REMUXER_PARAM {
+		char src[260];
+		char dst[260];
+		int64_t src_size;
+		std::atomic_bool running;
+		cb_remux_progress cb_progress;
+		cb_remux_state cb_state;
+	}REMUXER_PARAM;
+
+	typedef std::function<void(REMUXER_PARAM*)> thread_remuxing;
+
+	typedef struct _REMUXER_HANDLE {
+		REMUXER_PARAM param;
+		std::thread fn;
+	}REMUXER_HANDLE;
+	
+
+	class remuxer_ffmpeg
+	{
+	private:
+		remuxer_ffmpeg(){}
+		
+		~remuxer_ffmpeg() { destroy_remux(); }
+
+	public:
+		static remuxer_ffmpeg *instance();
+		static void release();
+
+		int create_remux(const REMUXER_PARAM & param);
+
+		void remove_remux(std::string src);
+
+		void destroy_remux();
+
+	private:
+		std::map<std::string, REMUXER_HANDLE*> _handlers;
+	};
+
+}
+
+
+
+#endif // !REMUXER_FFMPEG

+ 96 - 0
libs/Recorder/resample_pcm.cpp

@@ -0,0 +1,96 @@
+#include "resample_pcm.h"
+
+#include "log_helper.h"
+#include "error_define.h"
+
+namespace am {
+	resample_pcm::resample_pcm()
+	{
+		_sample_src = NULL;
+		_sample_dst = NULL;
+		_ctx = NULL;
+	}
+
+	resample_pcm::~resample_pcm()
+	{
+		cleanup();
+	}
+
+	int resample_pcm::init(const SAMPLE_SETTING * sample_src, const SAMPLE_SETTING * sample_dst, int * resapmled_frame_size)
+	{
+		int err = AE_NO;
+
+		do {
+			_sample_src = (SAMPLE_SETTING*)malloc(sizeof(SAMPLE_SETTING));
+			_sample_dst = (SAMPLE_SETTING*)malloc(sizeof(SAMPLE_SETTING));
+
+			memcpy(_sample_src, sample_src, sizeof(SAMPLE_SETTING));
+			memcpy(_sample_dst, sample_dst, sizeof(SAMPLE_SETTING));
+
+			_ctx = swr_alloc_set_opts(NULL,
+				_sample_dst->channel_layout, _sample_dst->fmt, _sample_dst->sample_rate,
+				_sample_src->channel_layout, _sample_src->fmt, _sample_src->sample_rate,
+				0, NULL);
+
+			if (_ctx == NULL) {
+				err = AE_RESAMPLE_INIT_FAILED;
+				break;
+			}
+
+			int ret = swr_init(_ctx);
+			if (ret < 0) {
+				err = AE_RESAMPLE_INIT_FAILED;
+				break;
+			}
+
+
+
+			*resapmled_frame_size = av_samples_get_buffer_size(NULL, _sample_dst->nb_channels, _sample_dst->nb_samples, _sample_dst->fmt, 1);
+
+		} while (0);
+
+		if (err != AE_NO) {
+			cleanup();
+			al_fatal("resample pcm init failed:%d", err);
+		}
+
+		return err;
+	}
+
+	int resample_pcm::convert(const uint8_t * src, int src_len, uint8_t * dst, int dst_len)
+	{
+
+		uint8_t *out[2] = { 0 };
+		out[0] = dst;
+		out[1] = dst + dst_len / 2;
+
+		const uint8_t *in1[2] = { src,NULL };
+
+		/*
+		uint8_t *in[2] = { 0 };
+		in[0] = (uint8_t*)src;
+		in[1] = (uint8_t*)(src + src_len / 2);
+
+		AVFrame *sample_frame = av_frame_alloc();
+		sample_frame->nb_samples = 1024;
+		sample_frame->channel_layout = _sample_dst->channel_layout;
+		sample_frame->format = _sample_dst->fmt;
+		sample_frame->sample_rate = _sample_dst->sample_rate;
+
+		avcodec_fill_audio_frame(sample_frame, _sample_dst->nb_channels, _sample_dst->fmt, src, src_len, 0);
+		*/
+
+		return swr_convert(_ctx, out, _sample_dst->nb_samples, in1, _sample_src->nb_samples);
+	}
+	void resample_pcm::cleanup()
+	{
+		if(_sample_src)
+			free(_sample_src);
+
+		if(_sample_dst)
+			free(_sample_dst);
+
+		if(_ctx)
+			swr_free(&_ctx);
+	}
+}

+ 33 - 0
libs/Recorder/resample_pcm.h

@@ -0,0 +1,33 @@
+#ifndef RESAMPLE_PCM
+#define RESAMPLE_PCM
+
+#include <stdint.h>
+
+#include "headers_ffmpeg.h"
+
+namespace am {
+	typedef struct {
+		int nb_samples;
+		int64_t channel_layout;
+		int nb_channels;
+		AVSampleFormat fmt;
+		int sample_rate;
+	}SAMPLE_SETTING;
+
+	class resample_pcm
+	{
+	public:
+		resample_pcm();
+		~resample_pcm();
+
+		int init(const SAMPLE_SETTING *sample_src, const SAMPLE_SETTING *sample_dst,__out int *resapmled_frame_size);
+		int convert(const uint8_t *src, int src_len, uint8_t *dst, int dst_len);
+	protected:
+		void cleanup();
+	private:
+		SwrContext *_ctx;
+		SAMPLE_SETTING *_sample_src;
+		SAMPLE_SETTING *_sample_dst;
+	};
+}
+#endif

+ 96 - 0
libs/Recorder/ring_buffer.cpp

@@ -0,0 +1,96 @@
+#include "ring_buffer.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	template<typename T>
+	ring_buffer<T>::ring_buffer(unsigned int size)
+	{
+		_size = size;
+		_head = _tail = 0;
+
+		_buf = new uint8_t[size];
+	}
+
+	template<typename T>
+	ring_buffer<T>::~ring_buffer()
+	{
+		if (_buf)
+			delete[] _buf;
+	}
+
+	template<typename T>
+	void ring_buffer<T>::put(const void * data, int len, const T & type)
+	{
+		std::lock_guard<std::mutex> locker(_lock);
+
+		if (_head + len <= _size) {
+			memcpy(_buf + _head, data, len);
+
+			_head += len;
+		}
+		else if (_head + len > _size) {
+			int remain = len - (_size - _head);
+			if (len - remain > 0)
+				memcpy(_buf + _head, data, len - remain);
+
+			if (remain > 0)
+				memcpy(_buf, (unsigned char*)data + len - remain, remain);
+
+			_head = remain;
+		}
+
+		struct ring_frame<T> frame;
+		frame.len = len;
+		frame.type = type;
+
+		_frames.push(frame);
+	}
+
+	template<typename T>
+	int ring_buffer<T>::get(void * data, int len, T & type)
+	{
+		std::lock_guard<std::mutex> locker(_lock);
+
+		int retLen = 0;
+
+		if (_frames.size() <= 0) {
+			retLen = 0;
+			return retLen;
+		}
+
+		struct ring_frame<T> frame = _frames.front();
+		_frames.pop();
+
+		if (frame.len > len) {
+			al_error("ringbuff::get need larger buffer");
+			return 0;
+		}
+
+		type = frame.type
+
+		retLen = frame.len;
+
+		if (_tail + frame.len <= _size) {
+
+			memcpy(data, _buf + _tail, frame.len);
+
+			_tail += frame.len;
+		}
+		else {
+			int remain = frame.len - (_size - _tail);
+
+			if (frame.len - remain > 0)
+				memcpy(data, _buf + _tail, frame.len - remain);
+
+			if (remain > 0)
+				memcpy((unsigned char*)data + frame.len - remain, _buf, remain);
+
+			_tail = remain;
+		}
+
+		return retLen;
+	}
+}

+ 120 - 0
libs/Recorder/ring_buffer.h

@@ -0,0 +1,120 @@
+#ifndef RING_BUFFER
+#define RING_BUFFER
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <queue>
+#include <mutex>
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	template <typename T>
+	struct ring_frame {
+		T type;
+		int len;
+	};
+
+	template <typename T>
+	class ring_buffer
+	{
+	public:
+		ring_buffer(unsigned int size = 1920 * 1080 * 4 * 10)
+		{
+			_size = size;
+			_head = _tail = 0;
+
+			_buf = new uint8_t[size];
+		}
+		~ring_buffer()
+		{
+			if (_buf)
+				delete[] _buf;
+		}
+
+		void put(const void *data, int len, const T &type)
+		{
+			std::lock_guard<std::mutex> locker(_lock);
+
+			if (_head + len <= _size) {
+				memcpy(_buf + _head, data, len);
+
+				_head += len;
+			}
+			else if (_head + len > _size) {
+				int remain = len - (_size - _head);
+				if (len - remain > 0)
+					memcpy(_buf + _head, data, len - remain);
+
+				if (remain > 0)
+					memcpy(_buf, (unsigned char*)data + len - remain, remain);
+
+				_head = remain;
+			}
+
+			struct ring_frame<T> frame;
+			frame.len = len;
+			frame.type = type;
+
+			_frames.push(frame);
+
+		}
+
+		int get(void *data, int len, T &type)
+		{
+			std::lock_guard<std::mutex> locker(_lock);
+
+			int retLen = 0;
+
+			if (_frames.size() <= 0) {
+				retLen = 0;
+				return retLen;
+			}
+
+			struct ring_frame<T> frame = _frames.front();
+			_frames.pop();
+
+			if (frame.len > len) {
+				al_error("ringbuff::get need larger buffer");
+				return 0;
+			}
+
+			type = frame.type;
+
+			retLen = frame.len;
+
+			if (_tail + frame.len <= _size) {
+
+				memcpy(data, _buf + _tail, frame.len);
+
+				_tail += frame.len;
+			}
+			else {
+				int remain = frame.len - (_size - _tail);
+
+				if (frame.len - remain > 0)
+					memcpy(data, _buf + _tail, frame.len - remain);
+
+				if (remain > 0)
+					memcpy((unsigned char*)data + frame.len - remain, _buf, remain);
+
+				_tail = remain;
+			}
+
+			return retLen;
+		}
+
+	private:
+		std::queue<ring_frame<T>> _frames;
+		unsigned int _size, _head, _tail;
+
+		uint8_t *_buf;
+
+		std::mutex _lock;
+	};
+
+}
+#endif

+ 97 - 0
libs/Recorder/sws_helper.cpp

@@ -0,0 +1,97 @@
+#include "sws_helper.h"
+
+#include "error_define.h"
+#include "log_helper.h"
+
+namespace am {
+
+	sws_helper::sws_helper()
+	{
+		_inited = false;
+
+		_frame = NULL;
+
+		_buffer = NULL;
+
+		_ctx = NULL;
+	}
+
+
+	sws_helper::~sws_helper()
+	{
+		cleanup();
+	}
+
+	int sws_helper::init(AVPixelFormat src_fmt, int src_width, int src_height, AVPixelFormat dst_fmt, int dst_width, int dst_height)
+	{
+		if (_inited)
+			return AE_NO;
+
+		_ctx = sws_getContext(
+			src_width,
+			src_height,
+			src_fmt,
+			dst_width,
+			dst_height,
+			dst_fmt,
+			SWS_BICUBIC,
+			NULL, NULL, NULL
+		);
+
+		if (!_ctx) {
+			return AE_FFMPEG_NEW_SWSCALE_FAILED;
+		}
+
+		_buffer_size = av_image_get_buffer_size(dst_fmt, dst_width, dst_height, 1);
+		_buffer = new uint8_t[_buffer_size];
+
+		_frame = av_frame_alloc();
+
+		av_image_fill_arrays(_frame->data, _frame->linesize, _buffer, dst_fmt, dst_width, dst_height, 1);
+
+		_inited = true;
+
+		return AE_NO;
+	}
+
+	int sws_helper::convert(const AVFrame *frame, uint8_t ** out_data, int * len)
+	{
+		int error = AE_NO;
+		if (!_inited || !_ctx || !_buffer)
+			return AE_NEED_INIT;
+
+		int ret = sws_scale(
+			_ctx,
+			(const uint8_t *const *)frame->data,
+			frame->linesize,
+			0, frame->height,
+			_frame->data, _frame->linesize
+		);
+
+		*out_data = _buffer;
+		*len = _buffer_size;
+
+		return error;
+	}
+
+	void sws_helper::cleanup()
+	{
+		_inited = false;
+
+		if (_ctx)
+			sws_freeContext(_ctx);
+
+		_ctx = NULL;
+
+		if (_frame)
+			av_frame_free(&_frame);
+
+		_frame = NULL;
+
+		if (_buffer)
+			delete[] _buffer;
+
+		_buffer = NULL;
+	}
+
+}

+ 36 - 0
libs/Recorder/sws_helper.h

@@ -0,0 +1,36 @@
+#pragma once
+
+#include <atomic>
+
+#include "headers_ffmpeg.h"
+
+namespace am {
+
+	class sws_helper
+	{
+	public:
+		sws_helper();
+		~sws_helper();
+		
+		int init(
+			AVPixelFormat src_fmt,int src_width,int src_height,
+			AVPixelFormat dst_fmt,int dst_width,int dst_height
+		);
+
+		int convert(const AVFrame *frame, uint8_t ** out_data, int *len);
+
+	private:
+		void cleanup();
+
+	private:
+		std::atomic_bool _inited;
+
+		AVFrame *_frame;
+
+		uint8_t *_buffer;
+		int _buffer_size;
+
+		struct SwsContext *_ctx;
+	};
+
+}

+ 38 - 0
libs/Recorder/system_error.cpp

@@ -0,0 +1,38 @@
+#include "system_error.h"
+
+#include <Windows.h>
+
+namespace am {
+
+const std::string& system_error::error2str(unsigned long error)
+{
+	DWORD system_locale = MAKELANGID(LANG_NEUTRAL, SUBLANG_NEUTRAL);
+
+	HLOCAL local_buf = nullptr;
+
+	BOOL ret = FormatMessage(
+		FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_ALLOCATE_BUFFER,
+		NULL, error, system_locale,(PSTR) &local_buf, 0, NULL);
+
+	if (!ret) {
+		HMODULE hnetmsg = LoadLibraryEx("netmsg.dll", NULL, DONT_RESOLVE_DLL_REFERENCES);
+		if (hnetmsg != nullptr) {
+			ret = FormatMessage(
+				FORMAT_MESSAGE_FROM_HMODULE | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_ALLOCATE_BUFFER,
+				hnetmsg, error, system_locale, (PSTR)&local_buf, 0, NULL);
+
+			FreeLibrary(hnetmsg);
+		}
+	}
+
+	std::string error_str;
+
+	if (ret) {
+		error_str = (LPCTSTR)LocalLock(local_buf);
+		LocalFree(local_buf);
+	}
+
+	return error_str;
+}
+
+}

+ 12 - 0
libs/Recorder/system_error.h

@@ -0,0 +1,12 @@
+#pragma once
+
+#include <string>
+
+namespace am {
+
+class system_error {
+public:
+	static const std::string& error2str(unsigned long error);
+};
+
+}

+ 48 - 0
libs/Recorder/system_lib.cpp

@@ -0,0 +1,48 @@
+#include "system_lib.h"
+
+#include "log_helper.h"
+
+namespace am {
+
+	static char system_path[260] = { 0 };
+
+	static bool get_system_path() {
+
+		if (strlen(system_path) == 0) {
+			UINT ret = GetSystemDirectoryA(system_path, MAX_PATH);
+			if (!ret) {
+				al_fatal("failed to get system directory :%lu", GetLastError());
+				return false;
+			}
+		}
+
+		return true;
+	}
+
+	HMODULE load_system_library(const char * name)
+	{
+		if (get_system_path() == false) return NULL;
+
+		char base_path[MAX_PATH] = { 0 };
+		strcpy(base_path, system_path);
+		strcat(base_path, "\\");
+		strcat(base_path, name);
+
+		HMODULE module = GetModuleHandleA(base_path);
+		if (module)
+			return module;
+
+		module = LoadLibraryA(base_path);
+		if (!module) {
+			al_error("failed load system library :%lu", GetLastError());
+		}
+
+		return module;
+	}
+
+	void free_system_library(HMODULE handle)
+	{
+		FreeModule(handle);
+	}
+
+}

+ 10 - 0
libs/Recorder/system_lib.h

@@ -0,0 +1,10 @@
+#pragma once
+
+#include <Windows.h>
+
+namespace am{
+	HMODULE load_system_library(const char *name);
+	
+	void free_system_library(HMODULE handle);
+
+}

+ 32 - 0
libs/Recorder/system_time.cpp

@@ -0,0 +1,32 @@
+#include "system_time.h"
+
+#include <Windows.h>
+
+namespace am {
+
+	static bool got_clockfreq = false;
+	static LARGE_INTEGER  clock_freq;
+
+	static uint64_t get_clockfreq() {
+		if (!got_clockfreq) {
+			QueryPerformanceFrequency(&clock_freq);
+			got_clockfreq = true;
+		}
+
+		return clock_freq.QuadPart;
+	}
+
+	uint64_t system_time::get_time_ns()
+	{
+		LARGE_INTEGER current_time;
+		double time_val;
+
+		QueryPerformanceCounter(&current_time);
+		time_val = (double)current_time.QuadPart;
+		time_val *= 1000000000.0;
+		time_val /= (double)get_clockfreq();
+
+		return (uint64_t)time_val;
+	}
+
+}

+ 19 - 0
libs/Recorder/system_time.h

@@ -0,0 +1,19 @@
+#ifndef SYSTEM_TIME
+#define SYSTEM_TIME
+
+#include <stdint.h>
+
+namespace am {
+
+	class system_time
+	{
+	private:
+		system_time() {};
+		~system_time() {};
+	public:
+		static uint64_t get_time_ns();
+	};
+
+}
+
+#endif // !SYSTEM_TIME

+ 165 - 0
libs/Recorder/system_version.cpp

@@ -0,0 +1,165 @@
+#include "system_version.h"
+
+#include <Windows.h>
+
+#include "utils_string.h"
+#include "log_helper.h"
+
+#define WINVER_REG_KEY L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion"
+
+typedef DWORD(WINAPI *get_file_version_info_size_w_t)(LPCWSTR module,
+	LPDWORD unused);
+typedef BOOL(WINAPI *get_file_version_info_w_t)(LPCWSTR module, DWORD unused,
+	DWORD len, LPVOID data);
+typedef BOOL(WINAPI *ver_query_value_w_t)(LPVOID data, LPCWSTR subblock,
+	LPVOID *buf, PUINT sizeout);
+
+static get_file_version_info_size_w_t get_file_version_info_size = NULL;
+static get_file_version_info_w_t get_file_version_info = NULL;
+static ver_query_value_w_t ver_query_value = NULL;
+static bool ver_initialized = false;
+static bool ver_initialize_success = false;
+
+static bool initialize_version_functions(void)
+{
+	HMODULE ver = GetModuleHandleW(L"version");
+
+	ver_initialized = true;
+
+	if (!ver) {
+		ver = LoadLibraryW(L"version");
+		if (!ver) {
+			al_error("failed to load windows version library");
+			return false;
+		}
+	}
+
+	get_file_version_info_size =
+		(get_file_version_info_size_w_t)GetProcAddress(
+			ver, "GetFileVersionInfoSizeW");
+	get_file_version_info = (get_file_version_info_w_t)GetProcAddress(
+		ver, "GetFileVersionInfoW");
+	ver_query_value =
+		(ver_query_value_w_t)GetProcAddress(ver, "VerQueryValueW");
+
+	if (!get_file_version_info_size || !get_file_version_info ||
+		!ver_query_value) {
+		al_error("failed to load windows version functions");
+		return false;
+	}
+
+	ver_initialize_success = true;
+	return true;
+}
+
+namespace am {
+
+	bool system_version::get_dll(const std::string tar, winversion_info * info)
+	{
+		VS_FIXEDFILEINFO *file_info = NULL;
+		UINT len = 0;
+		BOOL success;
+		LPVOID data;
+		DWORD size;
+		std::wstring wtar = utils_string::ascii_unicode(tar);
+
+		if (!ver_initialized && !initialize_version_functions())
+			return false;
+		if (!ver_initialize_success)
+			return false;
+
+		size = get_file_version_info_size(wtar.c_str(), NULL);
+		if (!size) {
+			al_error("failed to get %s version info size",tar.c_str());
+			return false;
+		}
+
+		data = malloc(size);
+		if (!get_file_version_info(wtar.c_str(), 0, size, data)) {
+			al_error("failed to get %s version info", tar.c_str());
+			free(data);
+			return false;
+		}
+
+		success = ver_query_value(data, L"\\", (LPVOID *)&file_info, &len);
+		if (!success || !file_info || !len) {
+			al_error("failed to get %s version info value",tar.c_str());
+			free(data);
+			return false;
+		}
+
+		info->major = (int)HIWORD(file_info->dwFileVersionMS);
+		info->minor = (int)LOWORD(file_info->dwFileVersionMS);
+		info->build = (int)HIWORD(file_info->dwFileVersionLS);
+		info->revis = (int)LOWORD(file_info->dwFileVersionLS);
+
+		free(data);
+		return true;
+	}
+
+	void system_version::get_win(winversion_info * info)
+	{
+		static winversion_info ver = { 0 };
+		static bool got_version = false;
+
+		if (!info)
+			return;
+
+		if (!got_version) {
+			get_dll("kernel32", &ver);
+			got_version = true;
+
+			if (ver.major == 10) {
+				HKEY key;
+				DWORD size, win10_revision;
+				LSTATUS status;
+
+				status = RegOpenKeyW(HKEY_LOCAL_MACHINE, WINVER_REG_KEY,
+					&key);
+				if (status != ERROR_SUCCESS)
+					return;
+
+				size = sizeof(win10_revision);
+
+				status = RegQueryValueExW(key, L"UBR", NULL, NULL,
+					(LPBYTE)&win10_revision,
+					&size);
+				if (status == ERROR_SUCCESS)
+					ver.revis = (int)win10_revision > ver.revis
+					? (int)win10_revision
+					: ver.revis;
+
+				RegCloseKey(key);
+			}
+		}
+
+		*info = ver;
+	}
+
+	bool system_version::is_win8_or_above()
+	{
+		winversion_info info;
+
+		get_win(&info);
+
+
+		return info.major > 6 || (info.major == 6 && info.minor >= 2);
+	}
+
+	bool system_version::is_win10_or_above(int build_number) {
+    winversion_info info;
+    get_win(&info);
+    return info.major > 6 || (info.major == 6 && info.minor >= 2);
+  }
+
+	bool system_version::is_32()
+	{
+#if defined(_WIN64)
+		return false;
+#elif defined(_WIN32)
+		BOOL b64 = false;
+		return !(IsWow64Process(GetCurrentProcess(), &b64) && b64);
+#endif
+	}
+
+}

+ 37 - 0
libs/Recorder/system_version.h

@@ -0,0 +1,37 @@
+#ifndef UTILS_WINVERSION
+#define UTILS_WINVERSION
+
+#include <stdint.h>
+#include <string>
+
+namespace am {
+	typedef struct _winversion_info {
+		int major;
+		int minor;
+		int build;
+		int revis;
+	}winversion_info;
+
+	class system_version
+	{
+	private:
+		system_version();
+
+	public:
+		static bool get_dll(const std::string tar, winversion_info *info);
+
+		static void get_win(winversion_info *info);
+
+		static bool is_win8_or_above();
+
+		static bool is_win10_or_above(int build_number = -1);
+
+		static bool is_32();
+
+	private:
+		std::string _target_file;
+	};
+
+}
+
+#endif

+ 1009 - 0
libs/Recorder/transcode_aac.cpp

@@ -0,0 +1,1009 @@
+/*
+* Copyright (c) 2013-2018 Andreas Unterweger
+*
+* This file is part of FFmpeg.
+*
+* FFmpeg is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* FFmpeg is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with FFmpeg; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/**
+* @file
+* Simple audio converter
+*
+* @example transcode_aac.c
+* Convert an input audio file to AAC in an MP4 container using FFmpeg.
+* Formats other than MP4 are supported based on the output file extension.
+* @author Andreas Unterweger (dustsigns@gmail.com)
+*/
+
+#include <stdio.h>
+
+#include "libavformat/avformat.h"
+#include "libavformat/avio.h"
+
+#include "libavcodec/avcodec.h"
+
+#include "libavutil/audio_fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/frame.h"
+#include "libavutil/opt.h"
+
+#include "libswresample/swresample.h"
+
+#include "common.h"
+
+/* The output bit rate in bit/s */
+#define OUTPUT_BIT_RATE 96000
+/* The number of output channels */
+#define OUTPUT_CHANNELS 2
+
+/**
+* Open an input file and the required decoder.
+* @param      filename             File to be opened
+* @param[out] input_format_context Format context of opened file
+* @param[out] input_codec_context  Codec context of opened file
+* @return Error code (0 if successful)
+*/
+static int open_input_file(const char *filename,
+	AVFormatContext **input_format_context,
+	AVCodecContext **input_codec_context)
+{
+	AVCodecContext *avctx;
+	AVCodec *input_codec;
+	int error;
+
+	/* Open the input file to read from it. */
+	if ((error = avformat_open_input(input_format_context, filename, NULL,
+		NULL)) < 0) {
+		fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
+			filename, av_err2str(error));
+		*input_format_context = NULL;
+		return error;
+	}
+
+	/* Get information on the input file (number of streams etc.). */
+	if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
+		fprintf(stderr, "Could not open find stream info (error '%s')\n",
+			av_err2str(error));
+		avformat_close_input(input_format_context);
+		return error;
+	}
+
+	/* Make sure that there is only one stream in the input file. */
+	if ((*input_format_context)->nb_streams != 1) {
+		fprintf(stderr, "Expected one audio input stream, but found %d\n",
+			(*input_format_context)->nb_streams);
+		avformat_close_input(input_format_context);
+		return AVERROR_EXIT;
+	}
+
+	/* Find a decoder for the audio stream. */
+	if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
+		fprintf(stderr, "Could not find input codec\n");
+		avformat_close_input(input_format_context);
+		return AVERROR_EXIT;
+	}
+
+	/* Allocate a new decoding context. */
+	avctx = avcodec_alloc_context3(input_codec);
+	if (!avctx) {
+		fprintf(stderr, "Could not allocate a decoding context\n");
+		avformat_close_input(input_format_context);
+		return AVERROR(ENOMEM);
+	}
+
+	/* Initialize the stream parameters with demuxer information. */
+	error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
+	if (error < 0) {
+		avformat_close_input(input_format_context);
+		avcodec_free_context(&avctx);
+		return error;
+	}
+
+	/* Open the decoder for the audio stream to use it later. */
+	if ((error = avcodec_open2(avctx, input_codec, NULL)) < 0) {
+		fprintf(stderr, "Could not open input codec (error '%s')\n",
+			av_err2str(error));
+		avcodec_free_context(&avctx);
+		avformat_close_input(input_format_context);
+		return error;
+	}
+
+	/* Save the decoder context for easier access later. */
+	*input_codec_context = avctx;
+
+	return 0;
+}
+
+/**
+* Open an output file and the required encoder.
+* Also set some basic encoder parameters.
+* Some of these parameters are based on the input file's parameters.
+* @param      filename              File to be opened
+* @param      input_codec_context   Codec context of input file
+* @param[out] output_format_context Format context of output file
+* @param[out] output_codec_context  Codec context of output file
+* @return Error code (0 if successful)
+*/
+static int open_output_file(const char *filename,
+	AVCodecContext *input_codec_context,
+	AVFormatContext **output_format_context,
+	AVCodecContext **output_codec_context)
+{
+	AVCodecContext *avctx = NULL;
+	AVIOContext *output_io_context = NULL;
+	AVStream *stream = NULL;
+	AVCodec *output_codec = NULL;
+	int error;
+
+	/* Open the output file to write to it. */
+	if ((error = avio_open(&output_io_context, filename,
+		AVIO_FLAG_WRITE)) < 0) {
+		fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
+			filename, av_err2str(error));
+		return error;
+	}
+
+	/* Create a new format context for the output container format. */
+	if (!(*output_format_context = avformat_alloc_context())) {
+		fprintf(stderr, "Could not allocate output format context\n");
+		return AVERROR(ENOMEM);
+	}
+
+	/* Associate the output file (pointer) with the container format context. */
+	(*output_format_context)->pb = output_io_context;
+
+	/* Guess the desired container format based on the file extension. */
+	if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
+		NULL))) {
+		fprintf(stderr, "Could not find output file format\n");
+		goto cleanup;
+	}
+
+	if (!((*output_format_context)->url = av_strdup(filename))) {
+		fprintf(stderr, "Could not allocate url.\n");
+		error = AVERROR(ENOMEM);
+		goto cleanup;
+	}
+
+	/* Find the encoder to be used by its name. */
+	if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
+		fprintf(stderr, "Could not find an AAC encoder.\n");
+		goto cleanup;
+	}
+
+	/* Create a new audio stream in the output file container. */
+	if (!(stream = avformat_new_stream(*output_format_context, NULL))) {
+		fprintf(stderr, "Could not create new stream\n");
+		error = AVERROR(ENOMEM);
+		goto cleanup;
+	}
+
+	avctx = avcodec_alloc_context3(output_codec);
+	if (!avctx) {
+		fprintf(stderr, "Could not allocate an encoding context\n");
+		error = AVERROR(ENOMEM);
+		goto cleanup;
+	}
+
+	/* Set the basic encoder parameters.
+	* The input file's sample rate is used to avoid a sample rate conversion. */
+	avctx->channels = OUTPUT_CHANNELS;
+	avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
+	avctx->sample_rate = input_codec_context->sample_rate;
+	avctx->sample_fmt = output_codec->sample_fmts[0];
+	avctx->bit_rate = OUTPUT_BIT_RATE;
+
+	/* Allow the use of the experimental AAC encoder. */
+	avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+
+	/* Set the sample rate for the container. */
+	stream->time_base.den = input_codec_context->sample_rate;
+	stream->time_base.num = 1;
+
+	/* Some container formats (like MP4) require global headers to be present.
+	* Mark the encoder so that it behaves accordingly. */
+	if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
+		avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+	/* Open the encoder for the audio stream to use it later. */
+	if ((error = avcodec_open2(avctx, output_codec, NULL)) < 0) {
+		fprintf(stderr, "Could not open output codec (error '%s')\n",
+			av_err2str(error));
+		goto cleanup;
+	}
+
+	error = avcodec_parameters_from_context(stream->codecpar, avctx);
+	if (error < 0) {
+		fprintf(stderr, "Could not initialize stream parameters\n");
+		goto cleanup;
+	}
+
+	/* Save the encoder context for easier access later. */
+	*output_codec_context = avctx;
+
+	return 0;
+
+cleanup:
+	avcodec_free_context(&avctx);
+	avio_closep(&(*output_format_context)->pb);
+	avformat_free_context(*output_format_context);
+	*output_format_context = NULL;
+	return error < 0 ? error : AVERROR_EXIT;
+}
+
+/**
+* Initialize one data packet for reading or writing.
+* @param packet Packet to be initialized
+*/
+static void init_packet(AVPacket *packet)
+{
+	av_init_packet(packet);
+	/* Set the packet data and size so that it is recognized as being empty. */
+	packet->data = NULL;
+	packet->size = 0;
+}
+
+/**
+* Initialize one audio frame for reading from the input file.
+* @param[out] frame Frame to be initialized
+* @return Error code (0 if successful)
+*/
+static int init_input_frame(AVFrame **frame)
+{
+	if (!(*frame = av_frame_alloc())) {
+		fprintf(stderr, "Could not allocate input frame\n");
+		return AVERROR(ENOMEM);
+	}
+	return 0;
+}
+
+/**
+* Initialize the audio resampler based on the input and output codec settings.
+* If the input and output sample formats differ, a conversion is required
+* libswresample takes care of this, but requires initialization.
+* @param      input_codec_context  Codec context of the input file
+* @param      output_codec_context Codec context of the output file
+* @param[out] resample_context     Resample context for the required conversion
+* @return Error code (0 if successful)
+*/
+static int init_resampler(AVCodecContext *input_codec_context,
+	AVCodecContext *output_codec_context,
+	SwrContext **resample_context)
+{
+	int error;
+
+	/*
+	* Create a resampler context for the conversion.
+	* Set the conversion parameters.
+	* Default channel layouts based on the number of channels
+	* are assumed for simplicity (they are sometimes not detected
+	* properly by the demuxer and/or decoder).
+	*/
+	*resample_context = swr_alloc_set_opts(NULL,
+		av_get_default_channel_layout(output_codec_context->channels),
+		output_codec_context->sample_fmt,
+		output_codec_context->sample_rate,
+		av_get_default_channel_layout(input_codec_context->channels),
+		input_codec_context->sample_fmt,
+		input_codec_context->sample_rate,
+		0, NULL);
+	if (!*resample_context) {
+		fprintf(stderr, "Could not allocate resample context\n");
+		return AVERROR(ENOMEM);
+	}
+	/*
+	* Perform a sanity check so that the number of converted samples is
+	* not greater than the number of samples to be converted.
+	* If the sample rates differ, this case has to be handled differently
+	*/
+	av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
+
+	/* Open the resampler with the specified parameters. */
+	if ((error = swr_init(*resample_context)) < 0) {
+		fprintf(stderr, "Could not open resample context\n");
+		swr_free(resample_context);
+		return error;
+	}
+	return 0;
+}
+
+/**
+* Initialize a FIFO buffer for the audio samples to be encoded.
+* @param[out] fifo                 Sample buffer
+* @param      output_codec_context Codec context of the output file
+* @return Error code (0 if successful)
+*/
+static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
+{
+	/* Create the FIFO buffer based on the specified output sample format. */
+	if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
+		output_codec_context->channels, 1))) {
+		fprintf(stderr, "Could not allocate FIFO\n");
+		return AVERROR(ENOMEM);
+	}
+	return 0;
+}
+
+/**
+* Write the header of the output file container.
+* @param output_format_context Format context of the output file
+* @return Error code (0 if successful)
+*/
+static int write_output_file_header(AVFormatContext *output_format_context)
+{
+	int error;
+	if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
+		fprintf(stderr, "Could not write output file header (error '%s')\n",
+			av_err2str(error));
+		return error;
+	}
+	return 0;
+}
+
+/**
+* Decode one audio frame from the input file.
+* @param      frame                Audio frame to be decoded
+* @param      input_format_context Format context of the input file
+* @param      input_codec_context  Codec context of the input file
+* @param[out] data_present         Indicates whether data has been decoded
+* @param[out] finished             Indicates whether the end of file has
+*                                  been reached and all data has been
+*                                  decoded. If this flag is false, there
+*                                  is more data to be decoded, i.e., this
+*                                  function has to be called again.
+* @return Error code (0 if successful)
+*/
+static int decode_audio_frame(AVFrame *frame,
+	AVFormatContext *input_format_context,
+	AVCodecContext *input_codec_context,
+	int *data_present, int *finished)
+{
+	/* Packet used for temporary storage. */
+	AVPacket input_packet;
+	int error;
+	init_packet(&input_packet);
+
+	/* Read one audio frame from the input file into a temporary packet. */
+	if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
+		/* If we are at the end of the file, flush the decoder below. */
+		if (error == AVERROR_EOF)
+			*finished = 1;
+		else {
+			fprintf(stderr, "Could not read frame (error '%s')\n",
+				av_err2str(error));
+			return error;
+		}
+	}
+
+	/* Send the audio frame stored in the temporary packet to the decoder.
+	* The input audio stream decoder is used to do this. */
+	if ((error = avcodec_send_packet(input_codec_context, &input_packet)) < 0) {
+		fprintf(stderr, "Could not send packet for decoding (error '%s')\n",
+			av_err2str(error));
+		return error;
+	}
+
+	/* Receive one frame from the decoder. */
+	error = avcodec_receive_frame(input_codec_context, frame);
+	/* If the decoder asks for more data to be able to decode a frame,
+	* return indicating that no data is present. */
+	if (error == AVERROR(EAGAIN)) {
+		error = 0;
+		goto cleanup;
+		/* If the end of the input file is reached, stop decoding. */
+	}
+	else if (error == AVERROR_EOF) {
+		*finished = 1;
+		error = 0;
+		goto cleanup;
+	}
+	else if (error < 0) {
+		fprintf(stderr, "Could not decode frame (error '%s')\n",
+			av_err2str(error));
+		goto cleanup;
+		/* Default case: Return decoded data. */
+	}
+	else {
+		*data_present = 1;
+		goto cleanup;
+	}
+
+cleanup:
+	av_packet_unref(&input_packet);
+	return error;
+}
+
+/**
+* Initialize a temporary storage for the specified number of audio samples.
+* The conversion requires temporary storage due to the different format.
+* The number of audio samples to be allocated is specified in frame_size.
+* @param[out] converted_input_samples Array of converted samples. The
+*                                     dimensions are reference, channel
+*                                     (for multi-channel audio), sample.
+* @param      output_codec_context    Codec context of the output file
+* @param      frame_size              Number of samples to be converted in
+*                                     each round
+* @return Error code (0 if successful)
+*/
+static int init_converted_samples(uint8_t ***converted_input_samples,
+	AVCodecContext *output_codec_context,
+	int frame_size)
+{
+	int error;
+
+	/* Allocate as many pointers as there are audio channels.
+	* Each pointer will later point to the audio samples of the corresponding
+	* channels (although it may be NULL for interleaved formats).
+	*/
+	if (!(*converted_input_samples = (uint8_t**)calloc(output_codec_context->channels,
+		sizeof(**converted_input_samples)))) {
+		fprintf(stderr, "Could not allocate converted input sample pointers\n");
+		return AVERROR(ENOMEM);
+	}
+
+	/* Allocate memory for the samples of all channels in one consecutive
+	* block for convenience. */
+	if ((error = av_samples_alloc(*converted_input_samples, NULL,
+		output_codec_context->channels,
+		frame_size,
+		output_codec_context->sample_fmt, 0)) < 0) {
+		fprintf(stderr,
+			"Could not allocate converted input samples (error '%s')\n",
+			av_err2str(error));
+		av_freep(&(*converted_input_samples)[0]);
+		free(*converted_input_samples);
+		return error;
+	}
+	return 0;
+}
+
+/**
+* Convert the input audio samples into the output sample format.
+* The conversion happens on a per-frame basis, the size of which is
+* specified by frame_size.
+* @param      input_data       Samples to be decoded. The dimensions are
+*                              channel (for multi-channel audio), sample.
+* @param[out] converted_data   Converted samples. The dimensions are channel
+*                              (for multi-channel audio), sample.
+* @param      frame_size       Number of samples to be converted
+* @param      resample_context Resample context for the conversion
+* @return Error code (0 if successful)
+*/
+static int convert_samples(const uint8_t **input_data,
+	uint8_t **converted_data, const int frame_size,
+	SwrContext *resample_context)
+{
+	int error;
+
+	/* Convert the samples using the resampler. */
+	if ((error = swr_convert(resample_context,
+		converted_data, frame_size,
+		input_data, frame_size)) < 0) {
+		fprintf(stderr, "Could not convert input samples (error '%s')\n",
+			av_err2str(error));
+		return error;
+	}
+
+	return 0;
+}
+
+/**
+* Add converted input audio samples to the FIFO buffer for later processing.
+* @param fifo                    Buffer to add the samples to
+* @param converted_input_samples Samples to be added. The dimensions are channel
+*                                (for multi-channel audio), sample.
+* @param frame_size              Number of samples to be converted
+* @return Error code (0 if successful)
+*/
+static int add_samples_to_fifo(AVAudioFifo *fifo,
+	uint8_t **converted_input_samples,
+	const int frame_size)
+{
+	int error;
+
+	/* Make the FIFO as large as it needs to be to hold both,
+	* the old and the new samples. */
+	if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
+		fprintf(stderr, "Could not reallocate FIFO\n");
+		return error;
+	}
+
+	/* Store the new samples in the FIFO buffer. */
+	if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
+		frame_size) < frame_size) {
+		fprintf(stderr, "Could not write data to FIFO\n");
+		return AVERROR_EXIT;
+	}
+	return 0;
+}
+
+/**
+* Read one audio frame from the input file, decode, convert and store
+* it in the FIFO buffer.
+* @param      fifo                 Buffer used for temporary storage
+* @param      input_format_context Format context of the input file
+* @param      input_codec_context  Codec context of the input file
+* @param      output_codec_context Codec context of the output file
+* @param      resampler_context    Resample context for the conversion
+* @param[out] finished             Indicates whether the end of file has
+*                                  been reached and all data has been
+*                                  decoded. If this flag is false,
+*                                  there is more data to be decoded,
+*                                  i.e., this function has to be called
+*                                  again.
+* @return Error code (0 if successful)
+*/
+static int read_decode_convert_and_store(AVAudioFifo *fifo,
+	AVFormatContext *input_format_context,
+	AVCodecContext *input_codec_context,
+	AVCodecContext *output_codec_context,
+	SwrContext *resampler_context,
+	int *finished)
+{
+	/* Temporary storage of the input samples of the frame read from the file. */
+	AVFrame *input_frame = NULL;
+	/* Temporary storage for the converted input samples. */
+	uint8_t **converted_input_samples = NULL;
+	int data_present = 0;
+	int ret = AVERROR_EXIT;
+
+	/* Initialize temporary storage for one input frame. */
+	if (init_input_frame(&input_frame))
+		goto cleanup;
+	/* Decode one frame worth of audio samples. */
+	if (decode_audio_frame(input_frame, input_format_context,
+		input_codec_context, &data_present, finished))
+		goto cleanup;
+	/* If we are at the end of the file and there are no more samples
+	* in the decoder which are delayed, we are actually finished.
+	* This must not be treated as an error. */
+	if (*finished) {
+		ret = 0;
+		goto cleanup;
+	}
+	/* If there is decoded data, convert and store it. */
+	if (data_present) {
+		/* Initialize the temporary storage for the converted input samples. */
+		if (init_converted_samples(&converted_input_samples, output_codec_context,
+			input_frame->nb_samples))
+			goto cleanup;
+
+		/* Convert the input samples to the desired output sample format.
+		* This requires a temporary storage provided by converted_input_samples. */
+		if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
+			input_frame->nb_samples, resampler_context))
+			goto cleanup;
+
+		/* Add the converted input samples to the FIFO buffer for later processing. */
+		if (add_samples_to_fifo(fifo, converted_input_samples,
+			input_frame->nb_samples))
+			goto cleanup;
+		ret = 0;
+	}
+	ret = 0;
+
+cleanup:
+	if (converted_input_samples) {
+		av_freep(&converted_input_samples[0]);
+		free(converted_input_samples);
+	}
+	av_frame_free(&input_frame);
+
+	return ret;
+}
+
+/**
+* Initialize one input frame for writing to the output file.
+* The frame will be exactly frame_size samples large.
+* @param[out] frame                Frame to be initialized
+* @param      output_codec_context Codec context of the output file
+* @param      frame_size           Size of the frame
+* @return Error code (0 if successful)
+*/
+static int init_output_frame(AVFrame **frame,
+	AVCodecContext *output_codec_context,
+	int frame_size)
+{
+	int error;
+
+	/* Create a new frame to store the audio samples. */
+	if (!(*frame = av_frame_alloc())) {
+		fprintf(stderr, "Could not allocate output frame\n");
+		return AVERROR_EXIT;
+	}
+
+	/* Set the frame's parameters, especially its size and format.
+	* av_frame_get_buffer needs this to allocate memory for the
+	* audio samples of the frame.
+	* Default channel layouts based on the number of channels
+	* are assumed for simplicity. */
+	(*frame)->nb_samples = frame_size;
+	(*frame)->channel_layout = output_codec_context->channel_layout;
+	(*frame)->format = output_codec_context->sample_fmt;
+	(*frame)->sample_rate = output_codec_context->sample_rate;
+
+	/* Allocate the samples of the created frame. This call will make
+	* sure that the audio frame can hold as many samples as specified. */
+	if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
+		fprintf(stderr, "Could not allocate output frame samples (error '%s')\n",
+			av_err2str(error));
+		av_frame_free(frame);
+		return error;
+	}
+
+	return 0;
+}
+
+/* Global timestamp for the audio frames. */
+static int64_t pts = 0;
+
+/**
+* Encode one frame worth of audio to the output file.
+* @param      frame                 Samples to be encoded
+* @param      output_format_context Format context of the output file
+* @param      output_codec_context  Codec context of the output file
+* @param[out] data_present          Indicates whether data has been
+*                                   encoded
+* @return Error code (0 if successful)
+*/
+static int encode_audio_frame(AVFrame *frame,
+	AVFormatContext *output_format_context,
+	AVCodecContext *output_codec_context,
+	int *data_present)
+{
+	/* Packet used for temporary storage. */
+	AVPacket output_packet;
+	int error;
+	init_packet(&output_packet);
+
+	/* Set a timestamp based on the sample rate for the container. */
+	if (frame) {
+		frame->pts = pts;
+		pts += frame->nb_samples;
+	}
+
+	/* Send the audio frame stored in the temporary packet to the encoder.
+	* The output audio stream encoder is used to do this. */
+	error = avcodec_send_frame(output_codec_context, frame);
+	/* The encoder signals that it has nothing more to encode. */
+	if (error == AVERROR_EOF) {
+		error = 0;
+		goto cleanup;
+	}
+	else if (error < 0) {
+		fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
+			av_err2str(error));
+		return error;
+	}
+
+	/* Receive one encoded frame from the encoder. */
+	error = avcodec_receive_packet(output_codec_context, &output_packet);
+	/* If the encoder asks for more data to be able to provide an
+	* encoded frame, return indicating that no data is present. */
+	if (error == AVERROR(EAGAIN)) {
+		error = 0;
+		goto cleanup;
+		/* If the last frame has been encoded, stop encoding. */
+	}
+	else if (error == AVERROR_EOF) {
+		error = 0;
+		goto cleanup;
+	}
+	else if (error < 0) {
+		fprintf(stderr, "Could not encode frame (error '%s')\n",
+			av_err2str(error));
+		goto cleanup;
+		/* Default case: Return encoded data. */
+	}
+	else {
+		*data_present = 1;
+	}
+
+	/* Write one audio frame from the temporary packet to the output file. */
+	if (*data_present &&
+		(error = av_write_frame(output_format_context, &output_packet)) < 0) {
+		fprintf(stderr, "Could not write frame (error '%s')\n",
+			av_err2str(error));
+		goto cleanup;
+	}
+
+cleanup:
+	av_packet_unref(&output_packet);
+	return error;
+}
+
+/**
+* Load one audio frame from the FIFO buffer, encode and write it to the
+* output file.
+* @param fifo                  Buffer used for temporary storage
+* @param output_format_context Format context of the output file
+* @param output_codec_context  Codec context of the output file
+* @return Error code (0 if successful)
+*/
+static int load_encode_and_write(AVAudioFifo *fifo,
+	AVFormatContext *output_format_context,
+	AVCodecContext *output_codec_context)
+{
+	/* Temporary storage of the output samples of the frame written to the file. */
+	AVFrame *output_frame;
+	/* Use the maximum number of possible samples per frame.
+	* If there is less than the maximum possible frame size in the FIFO
+	* buffer use this number. Otherwise, use the maximum possible frame size. */
+	const int frame_size = FFMIN(av_audio_fifo_size(fifo),
+		output_codec_context->frame_size);
+	int data_written;
+
+	/* Initialize temporary storage for one output frame. */
+	if (init_output_frame(&output_frame, output_codec_context, frame_size))
+		return AVERROR_EXIT;
+
+	/* Read as many samples from the FIFO buffer as required to fill the frame.
+	* The samples are stored in the frame temporarily. */
+	if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
+		fprintf(stderr, "Could not read data from FIFO\n");
+		av_frame_free(&output_frame);
+		return AVERROR_EXIT;
+	}
+
+	/* Encode one frame worth of audio samples. */
+	if (encode_audio_frame(output_frame, output_format_context,
+		output_codec_context, &data_written)) {
+		av_frame_free(&output_frame);
+		return AVERROR_EXIT;
+	}
+	av_frame_free(&output_frame);
+	return 0;
+}
+
+/**
+* Write the trailer of the output file container.
+* @param output_format_context Format context of the output file
+* @return Error code (0 if successful)
+*/
+static int write_output_file_trailer(AVFormatContext *output_format_context)
+{
+	int error;
+	if ((error = av_write_trailer(output_format_context)) < 0) {
+		fprintf(stderr, "Could not write output file trailer (error '%s')\n",
+			av_err2str(error));
+		return error;
+	}
+	return 0;
+}
+
+int test_transcode()
+{
+	AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
+	AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
+	SwrContext *resample_context = NULL;
+	AVAudioFifo *fifo = NULL;
+	int ret = AVERROR_EXIT;
+
+	//if (argc != 3) {
+	//	fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
+	//	exit(1);
+	//}
+
+	const char *inputfile = "WAS_2019-09-05_14_19_42_109.wav";
+	const char *outputfile = "transcode.aac";
+
+	/* Open the input file for reading. */
+	if (open_input_file(inputfile, &input_format_context,
+		&input_codec_context))
+		goto cleanup;
+	/* Open the output file for writing. */
+	if (open_output_file(outputfile, input_codec_context,
+		&output_format_context, &output_codec_context))
+		goto cleanup;
+	/* Initialize the resampler to be able to convert audio sample formats. */
+	if (init_resampler(input_codec_context, output_codec_context,
+		&resample_context))
+		goto cleanup;
+	/* Initialize the FIFO buffer to store audio samples to be encoded. */
+	if (init_fifo(&fifo, output_codec_context))
+		goto cleanup;
+	/* Write the header of the output file container. */
+	if (write_output_file_header(output_format_context))
+		goto cleanup;
+
+	/* Loop as long as we have input samples to read or output samples
+	* to write; abort as soon as we have neither. */
+	while (1) {
+		/* Use the encoder's desired frame size for processing. */
+		const int output_frame_size = output_codec_context->frame_size;
+		int finished = 0;
+
+		/* Make sure that there is one frame worth of samples in the FIFO
+		* buffer so that the encoder can do its work.
+		* Since the decoder's and the encoder's frame size may differ, we
+		* need to FIFO buffer to store as many frames worth of input samples
+		* that they make up at least one frame worth of output samples. */
+		while (av_audio_fifo_size(fifo) < output_frame_size) {
+			/* Decode one frame worth of audio samples, convert it to the
+			* output sample format and put it into the FIFO buffer. */
+			if (read_decode_convert_and_store(fifo, input_format_context,
+				input_codec_context,
+				output_codec_context,
+				resample_context, &finished))
+				goto cleanup;
+
+			/* If we are at the end of the input file, we continue
+			* encoding the remaining audio samples to the output file. */
+			if (finished)
+				break;
+		}
+
+		/* If we have enough samples for the encoder, we encode them.
+		* At the end of the file, we pass the remaining samples to
+		* the encoder. */
+		while (av_audio_fifo_size(fifo) >= output_frame_size ||
+			(finished && av_audio_fifo_size(fifo) > 0))
+			/* Take one frame worth of audio samples from the FIFO buffer,
+			* encode it and write it to the output file. */
+			if (load_encode_and_write(fifo, output_format_context,
+				output_codec_context))
+				goto cleanup;
+
+		/* If we are at the end of the input file and have encoded
+		* all remaining samples, we can exit this loop and finish. */
+		if (finished) {
+			int data_written;
+			/* Flush the encoder as it may have delayed frames. */
+			do {
+				data_written = 0;
+				if (encode_audio_frame(NULL, output_format_context,
+					output_codec_context, &data_written))
+					goto cleanup;
+			} while (data_written);
+			break;
+		}
+	}
+
+	/* Write the trailer of the output file container. */
+	if (write_output_file_trailer(output_format_context))
+		goto cleanup;
+	ret = 0;
+
+cleanup:
+	if (fifo)
+		av_audio_fifo_free(fifo);
+	swr_free(&resample_context);
+	if (output_codec_context)
+		avcodec_free_context(&output_codec_context);
+	if (output_format_context) {
+		avio_closep(&output_format_context->pb);
+		avformat_free_context(output_format_context);
+	}
+	if (input_codec_context)
+		avcodec_free_context(&input_codec_context);
+	if (input_format_context)
+		avformat_close_input(&input_format_context);
+
+	return ret;
+
+}
+
+int main1(int argc, char **argv)
+{
+	AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
+	AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
+	SwrContext *resample_context = NULL;
+	AVAudioFifo *fifo = NULL;
+	int ret = AVERROR_EXIT;
+
+	//if (argc != 3) {
+	//	fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
+	//	exit(1);
+	//}
+
+	const char *inputfile = "WAS_2019-09-05_14_19_42_109.wav";
+	const char *outputfile = "transcode.aac";
+
+	/* Open the input file for reading. */
+	if (open_input_file(inputfile, &input_format_context,
+		&input_codec_context))
+		goto cleanup;
+	/* Open the output file for writing. */
+	if (open_output_file(outputfile, input_codec_context,
+		&output_format_context, &output_codec_context))
+		goto cleanup;
+	/* Initialize the resampler to be able to convert audio sample formats. */
+	if (init_resampler(input_codec_context, output_codec_context,
+		&resample_context))
+		goto cleanup;
+	/* Initialize the FIFO buffer to store audio samples to be encoded. */
+	if (init_fifo(&fifo, output_codec_context))
+		goto cleanup;
+	/* Write the header of the output file container. */
+	if (write_output_file_header(output_format_context))
+		goto cleanup;
+
+	/* Loop as long as we have input samples to read or output samples
+	* to write; abort as soon as we have neither. */
+	while (1) {
+		/* Use the encoder's desired frame size for processing. */
+		const int output_frame_size = output_codec_context->frame_size;
+		int finished = 0;
+
+		/* Make sure that there is one frame worth of samples in the FIFO
+		* buffer so that the encoder can do its work.
+		* Since the decoder's and the encoder's frame size may differ, we
+		* need to FIFO buffer to store as many frames worth of input samples
+		* that they make up at least one frame worth of output samples. */
+		while (av_audio_fifo_size(fifo) < output_frame_size) {
+			/* Decode one frame worth of audio samples, convert it to the
+			* output sample format and put it into the FIFO buffer. */
+			if (read_decode_convert_and_store(fifo, input_format_context,
+				input_codec_context,
+				output_codec_context,
+				resample_context, &finished))
+				goto cleanup;
+
+			/* If we are at the end of the input file, we continue
+			* encoding the remaining audio samples to the output file. */
+			if (finished)
+				break;
+		}
+
+		/* If we have enough samples for the encoder, we encode them.
+		* At the end of the file, we pass the remaining samples to
+		* the encoder. */
+		while (av_audio_fifo_size(fifo) >= output_frame_size ||
+			(finished && av_audio_fifo_size(fifo) > 0))
+			/* Take one frame worth of audio samples from the FIFO buffer,
+			* encode it and write it to the output file. */
+			if (load_encode_and_write(fifo, output_format_context,
+				output_codec_context))
+				goto cleanup;
+
+		/* If we are at the end of the input file and have encoded
+		* all remaining samples, we can exit this loop and finish. */
+		if (finished) {
+			int data_written;
+			/* Flush the encoder as it may have delayed frames. */
+			do {
+				data_written = 0;
+				if (encode_audio_frame(NULL, output_format_context,
+					output_codec_context, &data_written))
+					goto cleanup;
+			} while (data_written);
+			break;
+		}
+	}
+
+	/* Write the trailer of the output file container. */
+	if (write_output_file_trailer(output_format_context))
+		goto cleanup;
+	ret = 0;
+
+cleanup:
+	if (fifo)
+		av_audio_fifo_free(fifo);
+	swr_free(&resample_context);
+	if (output_codec_context)
+		avcodec_free_context(&output_codec_context);
+	if (output_format_context) {
+		avio_closep(&output_format_context->pb);
+		avformat_free_context(output_format_context);
+	}
+	if (input_codec_context)
+		avcodec_free_context(&input_codec_context);
+	if (input_format_context)
+		avformat_close_input(&input_format_context);
+
+	return ret;
+}

+ 66 - 0
libs/Recorder/utils_string.cpp

@@ -0,0 +1,66 @@
+#include "utils_string.h"
+
+#ifdef WIN32
+
+#include <Windows.h>
+
+#endif
+
+namespace am {
+
+	std::wstring utils_string::ascii_unicode(const std::string & str)
+	{
+		int unicodeLen = MultiByteToWideChar(CP_ACP, 0, str.c_str(), -1, nullptr, 0);
+
+		wchar_t *pUnicode = (wchar_t*)malloc(sizeof(wchar_t)*unicodeLen);
+
+		MultiByteToWideChar(CP_ACP, 0, str.c_str(), -1, pUnicode, unicodeLen);
+
+		std::wstring ret_str = pUnicode;
+
+		free(pUnicode);
+
+		return ret_str;
+	}
+
+	std::string utils_string::unicode_ascii(const std::wstring & wstr)
+	{
+		int ansiiLen = WideCharToMultiByte(CP_ACP, 0, wstr.c_str(), -1, nullptr, 0, nullptr, nullptr);
+		char *pAssii = (char*)malloc(sizeof(char)*ansiiLen);
+		WideCharToMultiByte(CP_ACP, 0, wstr.c_str(), -1, pAssii, ansiiLen, nullptr, nullptr);
+		std::string ret_str = pAssii;
+		free(pAssii);
+		return ret_str;
+	}
+
+	std::string utils_string::ascii_utf8(const std::string & str)
+	{
+		return unicode_utf8(ascii_unicode(str));
+	}
+
+	std::string utils_string::utf8_ascii(const std::string & utf8)
+	{
+		return unicode_ascii(utf8_unicode(utf8));
+	}
+
+	std::string utils_string::unicode_utf8(const std::wstring & wstr)
+	{
+		int ansiiLen = WideCharToMultiByte(CP_UTF8, 0, wstr.c_str(), -1, nullptr, 0, nullptr, nullptr);
+		char *pAssii = (char*)malloc(sizeof(char)*ansiiLen);
+		WideCharToMultiByte(CP_UTF8, 0, wstr.c_str(), -1, pAssii, ansiiLen, nullptr, nullptr);
+		std::string ret_str = pAssii;
+		free(pAssii);
+		return ret_str;
+	}
+
+	std::wstring utils_string::utf8_unicode(const std::string & utf8)
+	{
+		int unicodeLen = MultiByteToWideChar(CP_UTF8, 0, utf8.c_str(), -1, nullptr, 0);
+		wchar_t *pUnicode = (wchar_t*)malloc(sizeof(wchar_t)*unicodeLen);
+		MultiByteToWideChar(CP_UTF8, 0, utf8.c_str(), -1, pUnicode, unicodeLen);
+		std::wstring ret_str = pUnicode;
+		free(pUnicode);
+		return ret_str;
+	}
+
+}

+ 23 - 0
libs/Recorder/utils_string.h

@@ -0,0 +1,23 @@
+#pragma once
+
+#include <string>
+
+namespace am {
+
+	class utils_string
+	{
+	public:
+		static std::wstring ascii_unicode(const std::string & str);
+
+		static std::string unicode_ascii(const std::wstring &wstr);
+
+		static std::string ascii_utf8(const std::string & str);
+
+		static std::string utf8_ascii(const std::string &utf8);
+
+		static std::string  unicode_utf8(const std::wstring& wstr);
+
+		static std::wstring utf8_unicode(const std::string &utf8);
+	};
+
+}