🛠️🐜 Antkeeper superbuild with dependencies included https://antkeeper.com
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2155 lines
69 KiB

  1. /*
  2. * An example showing how to play a stream sync'd to video, using ffmpeg.
  3. *
  4. * Requires C++14.
  5. */
  6. #include <condition_variable>
  7. #include <functional>
  8. #include <algorithm>
  9. #include <iostream>
  10. #include <utility>
  11. #include <iomanip>
  12. #include <cstdint>
  13. #include <cstring>
  14. #include <cstdlib>
  15. #include <atomic>
  16. #include <cerrno>
  17. #include <chrono>
  18. #include <cstdio>
  19. #include <future>
  20. #include <memory>
  21. #include <string>
  22. #include <thread>
  23. #include <vector>
  24. #include <array>
  25. #include <cmath>
  26. #include <deque>
  27. #include <mutex>
  28. #include <ratio>
  29. #ifdef __GNUC__
  30. _Pragma("GCC diagnostic push")
  31. _Pragma("GCC diagnostic ignored \"-Wconversion\"")
  32. _Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
  33. #endif
  34. extern "C" {
  35. #include "libavcodec/avcodec.h"
  36. #include "libavformat/avformat.h"
  37. #include "libavformat/avio.h"
  38. #include "libavformat/version.h"
  39. #include "libavutil/avutil.h"
  40. #include "libavutil/error.h"
  41. #include "libavutil/frame.h"
  42. #include "libavutil/mem.h"
  43. #include "libavutil/pixfmt.h"
  44. #include "libavutil/rational.h"
  45. #include "libavutil/samplefmt.h"
  46. #include "libavutil/time.h"
  47. #include "libavutil/version.h"
  48. #include "libavutil/channel_layout.h"
  49. #include "libswscale/swscale.h"
  50. #include "libswresample/swresample.h"
  51. constexpr auto AVNoPtsValue = AV_NOPTS_VALUE;
  52. constexpr auto AVErrorEOF = AVERROR_EOF;
  53. struct SwsContext;
  54. }
  55. #include "SDL.h"
  56. #ifdef __GNUC__
  57. _Pragma("GCC diagnostic pop")
  58. #endif
  59. #include "AL/alc.h"
  60. #include "AL/al.h"
  61. #include "AL/alext.h"
  62. #include "common/alhelpers.h"
  63. namespace {
  64. inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
  65. #ifndef M_PI
  66. #define M_PI (3.14159265358979323846)
  67. #endif
  68. using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>;
  69. using nanoseconds = std::chrono::nanoseconds;
  70. using microseconds = std::chrono::microseconds;
  71. using milliseconds = std::chrono::milliseconds;
  72. using seconds = std::chrono::seconds;
  73. using seconds_d64 = std::chrono::duration<double>;
  74. using std::chrono::duration_cast;
  75. const std::string AppName{"alffplay"};
  76. ALenum DirectOutMode{AL_FALSE};
  77. bool EnableWideStereo{false};
  78. bool EnableUhj{false};
  79. bool EnableSuperStereo{false};
  80. bool DisableVideo{false};
  81. LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
  82. LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
  83. LPALEVENTCONTROLSOFT alEventControlSOFT;
  84. LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
  85. LPALBUFFERCALLBACKSOFT alBufferCallbackSOFT;
  86. const seconds AVNoSyncThreshold{10};
  87. #define VIDEO_PICTURE_QUEUE_SIZE 24
  88. const seconds_d64 AudioSyncThreshold{0.03};
  89. const milliseconds AudioSampleCorrectionMax{50};
  90. /* Averaging filter coefficient for audio sync. */
  91. #define AUDIO_DIFF_AVG_NB 20
  92. const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)};
  93. /* Per-buffer size, in time */
  94. constexpr milliseconds AudioBufferTime{20};
  95. /* Buffer total size, in time (should be divisible by the buffer time) */
  96. constexpr milliseconds AudioBufferTotalTime{800};
  97. constexpr auto AudioBufferCount = AudioBufferTotalTime / AudioBufferTime;
  98. enum {
  99. FF_MOVIE_DONE_EVENT = SDL_USEREVENT
  100. };
  101. enum class SyncMaster {
  102. Audio,
  103. Video,
  104. External,
  105. Default = Audio
  106. };
  107. inline microseconds get_avtime()
  108. { return microseconds{av_gettime()}; }
  109. /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
  110. struct AVIOContextDeleter {
  111. void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
  112. };
  113. using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
  114. struct AVFormatCtxDeleter {
  115. void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
  116. };
  117. using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
  118. struct AVCodecCtxDeleter {
  119. void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
  120. };
  121. using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
  122. struct AVPacketDeleter {
  123. void operator()(AVPacket *pkt) { av_packet_free(&pkt); }
  124. };
  125. using AVPacketPtr = std::unique_ptr<AVPacket,AVPacketDeleter>;
  126. struct AVFrameDeleter {
  127. void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
  128. };
  129. using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
  130. struct SwrContextDeleter {
  131. void operator()(SwrContext *ptr) { swr_free(&ptr); }
  132. };
  133. using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
  134. struct SwsContextDeleter {
  135. void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
  136. };
  137. using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
  138. template<size_t SizeLimit>
  139. class DataQueue {
  140. std::mutex mPacketMutex, mFrameMutex;
  141. std::condition_variable mPacketCond;
  142. std::condition_variable mInFrameCond, mOutFrameCond;
  143. std::deque<AVPacketPtr> mPackets;
  144. size_t mTotalSize{0};
  145. bool mFinished{false};
  146. AVPacketPtr getPacket()
  147. {
  148. std::unique_lock<std::mutex> plock{mPacketMutex};
  149. while(mPackets.empty() && !mFinished)
  150. mPacketCond.wait(plock);
  151. if(mPackets.empty())
  152. return nullptr;
  153. auto ret = std::move(mPackets.front());
  154. mPackets.pop_front();
  155. mTotalSize -= static_cast<unsigned int>(ret->size);
  156. return ret;
  157. }
  158. public:
  159. int sendPacket(AVCodecContext *codecctx)
  160. {
  161. AVPacketPtr packet{getPacket()};
  162. int ret{};
  163. {
  164. std::unique_lock<std::mutex> flock{mFrameMutex};
  165. while((ret=avcodec_send_packet(codecctx, packet.get())) == AVERROR(EAGAIN))
  166. mInFrameCond.wait_for(flock, milliseconds{50});
  167. }
  168. mOutFrameCond.notify_one();
  169. if(!packet)
  170. {
  171. if(!ret) return AVErrorEOF;
  172. std::cerr<< "Failed to send flush packet: "<<ret <<std::endl;
  173. return ret;
  174. }
  175. if(ret < 0)
  176. std::cerr<< "Failed to send packet: "<<ret <<std::endl;
  177. return ret;
  178. }
  179. int receiveFrame(AVCodecContext *codecctx, AVFrame *frame)
  180. {
  181. int ret{};
  182. {
  183. std::unique_lock<std::mutex> flock{mFrameMutex};
  184. while((ret=avcodec_receive_frame(codecctx, frame)) == AVERROR(EAGAIN))
  185. mOutFrameCond.wait_for(flock, milliseconds{50});
  186. }
  187. mInFrameCond.notify_one();
  188. return ret;
  189. }
  190. void setFinished()
  191. {
  192. {
  193. std::lock_guard<std::mutex> _{mPacketMutex};
  194. mFinished = true;
  195. }
  196. mPacketCond.notify_one();
  197. }
  198. void flush()
  199. {
  200. {
  201. std::lock_guard<std::mutex> _{mPacketMutex};
  202. mFinished = true;
  203. mPackets.clear();
  204. mTotalSize = 0;
  205. }
  206. mPacketCond.notify_one();
  207. }
  208. bool put(const AVPacket *pkt)
  209. {
  210. {
  211. std::unique_lock<std::mutex> lock{mPacketMutex};
  212. if(mTotalSize >= SizeLimit || mFinished)
  213. return false;
  214. mPackets.push_back(AVPacketPtr{av_packet_alloc()});
  215. if(av_packet_ref(mPackets.back().get(), pkt) != 0)
  216. {
  217. mPackets.pop_back();
  218. return true;
  219. }
  220. mTotalSize += static_cast<unsigned int>(mPackets.back()->size);
  221. }
  222. mPacketCond.notify_one();
  223. return true;
  224. }
  225. };
  226. struct MovieState;
  227. struct AudioState {
  228. MovieState &mMovie;
  229. AVStream *mStream{nullptr};
  230. AVCodecCtxPtr mCodecCtx;
  231. DataQueue<2*1024*1024> mQueue;
  232. /* Used for clock difference average computation */
  233. seconds_d64 mClockDiffAvg{0};
  234. /* Time of the next sample to be buffered */
  235. nanoseconds mCurrentPts{0};
  236. /* Device clock time that the stream started at. */
  237. nanoseconds mDeviceStartTime{nanoseconds::min()};
  238. /* Decompressed sample frame, and swresample context for conversion */
  239. AVFramePtr mDecodedFrame;
  240. SwrContextPtr mSwresCtx;
  241. /* Conversion format, for what gets fed to OpenAL */
  242. uint64_t mDstChanLayout{0};
  243. AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
  244. /* Storage of converted samples */
  245. uint8_t *mSamples{nullptr};
  246. int mSamplesLen{0}; /* In samples */
  247. int mSamplesPos{0};
  248. int mSamplesMax{0};
  249. std::unique_ptr<uint8_t[]> mBufferData;
  250. size_t mBufferDataSize{0};
  251. std::atomic<size_t> mReadPos{0};
  252. std::atomic<size_t> mWritePos{0};
  253. /* OpenAL format */
  254. ALenum mFormat{AL_NONE};
  255. ALuint mFrameSize{0};
  256. std::mutex mSrcMutex;
  257. std::condition_variable mSrcCond;
  258. std::atomic_flag mConnected;
  259. ALuint mSource{0};
  260. std::array<ALuint,AudioBufferCount> mBuffers{};
  261. ALuint mBufferIdx{0};
  262. AudioState(MovieState &movie) : mMovie(movie)
  263. { mConnected.test_and_set(std::memory_order_relaxed); }
  264. ~AudioState()
  265. {
  266. if(mSource)
  267. alDeleteSources(1, &mSource);
  268. if(mBuffers[0])
  269. alDeleteBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
  270. av_freep(&mSamples);
  271. }
  272. static void AL_APIENTRY eventCallbackC(ALenum eventType, ALuint object, ALuint param,
  273. ALsizei length, const ALchar *message, void *userParam)
  274. { static_cast<AudioState*>(userParam)->eventCallback(eventType, object, param, length, message); }
  275. void eventCallback(ALenum eventType, ALuint object, ALuint param, ALsizei length,
  276. const ALchar *message);
  277. static ALsizei AL_APIENTRY bufferCallbackC(void *userptr, void *data, ALsizei size)
  278. { return static_cast<AudioState*>(userptr)->bufferCallback(data, size); }
  279. ALsizei bufferCallback(void *data, ALsizei size);
  280. nanoseconds getClockNoLock();
  281. nanoseconds getClock()
  282. {
  283. std::lock_guard<std::mutex> lock{mSrcMutex};
  284. return getClockNoLock();
  285. }
  286. bool startPlayback();
  287. int getSync();
  288. int decodeFrame();
  289. bool readAudio(uint8_t *samples, unsigned int length, int &sample_skip);
  290. bool readAudio(int sample_skip);
  291. int handler();
  292. };
  293. struct VideoState {
  294. MovieState &mMovie;
  295. AVStream *mStream{nullptr};
  296. AVCodecCtxPtr mCodecCtx;
  297. DataQueue<14*1024*1024> mQueue;
  298. /* The pts of the currently displayed frame, and the time (av_gettime) it
  299. * was last updated - used to have running video pts
  300. */
  301. nanoseconds mDisplayPts{0};
  302. microseconds mDisplayPtsTime{microseconds::min()};
  303. std::mutex mDispPtsMutex;
  304. /* Swscale context for format conversion */
  305. SwsContextPtr mSwscaleCtx;
  306. struct Picture {
  307. AVFramePtr mFrame{};
  308. nanoseconds mPts{nanoseconds::min()};
  309. };
  310. std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
  311. std::atomic<size_t> mPictQRead{0u}, mPictQWrite{1u};
  312. std::mutex mPictQMutex;
  313. std::condition_variable mPictQCond;
  314. SDL_Texture *mImage{nullptr};
  315. int mWidth{0}, mHeight{0}; /* Full texture size */
  316. bool mFirstUpdate{true};
  317. std::atomic<bool> mEOS{false};
  318. std::atomic<bool> mFinalUpdate{false};
  319. VideoState(MovieState &movie) : mMovie(movie) { }
  320. ~VideoState()
  321. {
  322. if(mImage)
  323. SDL_DestroyTexture(mImage);
  324. mImage = nullptr;
  325. }
  326. nanoseconds getClock();
  327. void display(SDL_Window *screen, SDL_Renderer *renderer, AVFrame *frame);
  328. void updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw);
  329. int handler();
  330. };
  331. struct MovieState {
  332. AVIOContextPtr mIOContext;
  333. AVFormatCtxPtr mFormatCtx;
  334. SyncMaster mAVSyncType{SyncMaster::Default};
  335. microseconds mClockBase{microseconds::min()};
  336. std::atomic<bool> mQuit{false};
  337. AudioState mAudio;
  338. VideoState mVideo;
  339. std::mutex mStartupMutex;
  340. std::condition_variable mStartupCond;
  341. bool mStartupDone{false};
  342. std::thread mParseThread;
  343. std::thread mAudioThread;
  344. std::thread mVideoThread;
  345. std::string mFilename;
  346. MovieState(std::string fname)
  347. : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
  348. { }
  349. ~MovieState()
  350. {
  351. stop();
  352. if(mParseThread.joinable())
  353. mParseThread.join();
  354. }
  355. static int decode_interrupt_cb(void *ctx);
  356. bool prepare();
  357. void setTitle(SDL_Window *window);
  358. void stop();
  359. nanoseconds getClock();
  360. nanoseconds getMasterClock();
  361. nanoseconds getDuration();
  362. int streamComponentOpen(unsigned int stream_index);
  363. int parse_handler();
  364. };
  365. nanoseconds AudioState::getClockNoLock()
  366. {
  367. // The audio clock is the timestamp of the sample currently being heard.
  368. if(alcGetInteger64vSOFT)
  369. {
  370. // If device start time = min, we aren't playing yet.
  371. if(mDeviceStartTime == nanoseconds::min())
  372. return nanoseconds::zero();
  373. // Get the current device clock time and latency.
  374. auto device = alcGetContextsDevice(alcGetCurrentContext());
  375. ALCint64SOFT devtimes[2]{0,0};
  376. alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
  377. auto latency = nanoseconds{devtimes[1]};
  378. auto device_time = nanoseconds{devtimes[0]};
  379. // The clock is simply the current device time relative to the recorded
  380. // start time. We can also subtract the latency to get more a accurate
  381. // position of where the audio device actually is in the output stream.
  382. return device_time - mDeviceStartTime - latency;
  383. }
  384. if(mBufferDataSize > 0)
  385. {
  386. if(mDeviceStartTime == nanoseconds::min())
  387. return nanoseconds::zero();
  388. /* With a callback buffer and no device clock, mDeviceStartTime is
  389. * actually the timestamp of the first sample frame played. The audio
  390. * clock, then, is that plus the current source offset.
  391. */
  392. ALint64SOFT offset[2];
  393. if(alGetSourcei64vSOFT)
  394. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
  395. else
  396. {
  397. ALint ioffset;
  398. alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
  399. offset[0] = ALint64SOFT{ioffset} << 32;
  400. offset[1] = 0;
  401. }
  402. /* NOTE: The source state must be checked last, in case an underrun
  403. * occurs and the source stops between getting the state and retrieving
  404. * the offset+latency.
  405. */
  406. ALint status;
  407. alGetSourcei(mSource, AL_SOURCE_STATE, &status);
  408. nanoseconds pts{};
  409. if(status == AL_PLAYING || status == AL_PAUSED)
  410. pts = mDeviceStartTime - nanoseconds{offset[1]} +
  411. duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
  412. else
  413. {
  414. /* If the source is stopped, the pts of the next sample to be heard
  415. * is the pts of the next sample to be buffered, minus the amount
  416. * already in the buffer ready to play.
  417. */
  418. const size_t woffset{mWritePos.load(std::memory_order_acquire)};
  419. const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
  420. const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
  421. roffset};
  422. pts = mCurrentPts - nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
  423. }
  424. return pts;
  425. }
  426. /* The source-based clock is based on 4 components:
  427. * 1 - The timestamp of the next sample to buffer (mCurrentPts)
  428. * 2 - The length of the source's buffer queue
  429. * (AudioBufferTime*AL_BUFFERS_QUEUED)
  430. * 3 - The offset OpenAL is currently at in the source (the first value
  431. * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
  432. * 4 - The latency between OpenAL and the DAC (the second value from
  433. * AL_SAMPLE_OFFSET_LATENCY_SOFT)
  434. *
  435. * Subtracting the length of the source queue from the next sample's
  436. * timestamp gives the timestamp of the sample at the start of the source
  437. * queue. Adding the source offset to that results in the timestamp for the
  438. * sample at OpenAL's current position, and subtracting the source latency
  439. * from that gives the timestamp of the sample currently at the DAC.
  440. */
  441. nanoseconds pts{mCurrentPts};
  442. if(mSource)
  443. {
  444. ALint64SOFT offset[2];
  445. if(alGetSourcei64vSOFT)
  446. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
  447. else
  448. {
  449. ALint ioffset;
  450. alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
  451. offset[0] = ALint64SOFT{ioffset} << 32;
  452. offset[1] = 0;
  453. }
  454. ALint queued, status;
  455. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  456. alGetSourcei(mSource, AL_SOURCE_STATE, &status);
  457. /* If the source is AL_STOPPED, then there was an underrun and all
  458. * buffers are processed, so ignore the source queue. The audio thread
  459. * will put the source into an AL_INITIAL state and clear the queue
  460. * when it starts recovery.
  461. */
  462. if(status != AL_STOPPED)
  463. {
  464. pts -= AudioBufferTime*queued;
  465. pts += duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
  466. }
  467. /* Don't offset by the latency if the source isn't playing. */
  468. if(status == AL_PLAYING)
  469. pts -= nanoseconds{offset[1]};
  470. }
  471. return std::max(pts, nanoseconds::zero());
  472. }
  473. bool AudioState::startPlayback()
  474. {
  475. const size_t woffset{mWritePos.load(std::memory_order_acquire)};
  476. const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
  477. const size_t readable{((woffset >= roffset) ? woffset : (mBufferDataSize+woffset)) -
  478. roffset};
  479. if(mBufferDataSize > 0)
  480. {
  481. if(readable == 0)
  482. return false;
  483. if(!alcGetInteger64vSOFT)
  484. mDeviceStartTime = mCurrentPts -
  485. nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate;
  486. }
  487. else
  488. {
  489. ALint queued{};
  490. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  491. if(queued == 0) return false;
  492. }
  493. alSourcePlay(mSource);
  494. if(alcGetInteger64vSOFT)
  495. {
  496. /* Subtract the total buffer queue time from the current pts to get the
  497. * pts of the start of the queue.
  498. */
  499. int64_t srctimes[2]{0,0};
  500. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
  501. auto device_time = nanoseconds{srctimes[1]};
  502. auto src_offset = duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
  503. mCodecCtx->sample_rate;
  504. /* The mixer may have ticked and incremented the device time and sample
  505. * offset, so subtract the source offset from the device time to get
  506. * the device time the source started at. Also subtract startpts to get
  507. * the device time the stream would have started at to reach where it
  508. * is now.
  509. */
  510. if(mBufferDataSize > 0)
  511. {
  512. nanoseconds startpts{mCurrentPts -
  513. nanoseconds{seconds{readable/mFrameSize}}/mCodecCtx->sample_rate};
  514. mDeviceStartTime = device_time - src_offset - startpts;
  515. }
  516. else
  517. {
  518. nanoseconds startpts{mCurrentPts - AudioBufferTotalTime};
  519. mDeviceStartTime = device_time - src_offset - startpts;
  520. }
  521. }
  522. return true;
  523. }
  524. int AudioState::getSync()
  525. {
  526. if(mMovie.mAVSyncType == SyncMaster::Audio)
  527. return 0;
  528. auto ref_clock = mMovie.getMasterClock();
  529. auto diff = ref_clock - getClockNoLock();
  530. if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
  531. {
  532. /* Difference is TOO big; reset accumulated average */
  533. mClockDiffAvg = seconds_d64::zero();
  534. return 0;
  535. }
  536. /* Accumulate the diffs */
  537. mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
  538. auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
  539. if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
  540. return 0;
  541. /* Constrain the per-update difference to avoid exceedingly large skips */
  542. diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax);
  543. return static_cast<int>(duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
  544. }
  545. int AudioState::decodeFrame()
  546. {
  547. do {
  548. while(int ret{mQueue.receiveFrame(mCodecCtx.get(), mDecodedFrame.get())})
  549. {
  550. if(ret == AVErrorEOF) return 0;
  551. std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
  552. }
  553. } while(mDecodedFrame->nb_samples <= 0);
  554. /* If provided, update w/ pts */
  555. if(mDecodedFrame->best_effort_timestamp != AVNoPtsValue)
  556. mCurrentPts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
  557. static_cast<double>(mDecodedFrame->best_effort_timestamp)});
  558. if(mDecodedFrame->nb_samples > mSamplesMax)
  559. {
  560. av_freep(&mSamples);
  561. av_samples_alloc(&mSamples, nullptr, mCodecCtx->channels, mDecodedFrame->nb_samples,
  562. mDstSampleFmt, 0);
  563. mSamplesMax = mDecodedFrame->nb_samples;
  564. }
  565. /* Return the amount of sample frames converted */
  566. int data_size{swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
  567. const_cast<const uint8_t**>(mDecodedFrame->data), mDecodedFrame->nb_samples)};
  568. av_frame_unref(mDecodedFrame.get());
  569. return data_size;
  570. }
  571. /* Duplicates the sample at in to out, count times. The frame size is a
  572. * multiple of the template type size.
  573. */
  574. template<typename T>
  575. static void sample_dup(uint8_t *out, const uint8_t *in, size_t count, size_t frame_size)
  576. {
  577. auto *sample = reinterpret_cast<const T*>(in);
  578. auto *dst = reinterpret_cast<T*>(out);
  579. /* NOTE: frame_size is a multiple of sizeof(T). */
  580. size_t type_mult{frame_size / sizeof(T)};
  581. if(type_mult == 1)
  582. std::fill_n(dst, count, *sample);
  583. else for(size_t i{0};i < count;++i)
  584. {
  585. for(size_t j{0};j < type_mult;++j)
  586. dst[i*type_mult + j] = sample[j];
  587. }
  588. }
  589. static void sample_dup(uint8_t *out, const uint8_t *in, size_t count, size_t frame_size)
  590. {
  591. if((frame_size&7) == 0)
  592. sample_dup<uint64_t>(out, in, count, frame_size);
  593. else if((frame_size&3) == 0)
  594. sample_dup<uint32_t>(out, in, count, frame_size);
  595. else if((frame_size&1) == 0)
  596. sample_dup<uint16_t>(out, in, count, frame_size);
  597. else
  598. sample_dup<uint8_t>(out, in, count, frame_size);
  599. }
  600. bool AudioState::readAudio(uint8_t *samples, unsigned int length, int &sample_skip)
  601. {
  602. unsigned int audio_size{0};
  603. /* Read the next chunk of data, refill the buffer, and queue it
  604. * on the source */
  605. length /= mFrameSize;
  606. while(mSamplesLen > 0 && audio_size < length)
  607. {
  608. unsigned int rem{length - audio_size};
  609. if(mSamplesPos >= 0)
  610. {
  611. const auto len = static_cast<unsigned int>(mSamplesLen - mSamplesPos);
  612. if(rem > len) rem = len;
  613. std::copy_n(mSamples + static_cast<unsigned int>(mSamplesPos)*mFrameSize,
  614. rem*mFrameSize, samples);
  615. }
  616. else
  617. {
  618. rem = std::min(rem, static_cast<unsigned int>(-mSamplesPos));
  619. /* Add samples by copying the first sample */
  620. sample_dup(samples, mSamples, rem, mFrameSize);
  621. }
  622. mSamplesPos += rem;
  623. mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
  624. samples += rem*mFrameSize;
  625. audio_size += rem;
  626. while(mSamplesPos >= mSamplesLen)
  627. {
  628. mSamplesLen = decodeFrame();
  629. mSamplesPos = std::min(mSamplesLen, sample_skip);
  630. if(mSamplesLen <= 0) break;
  631. sample_skip -= mSamplesPos;
  632. // Adjust the device start time and current pts by the amount we're
  633. // skipping/duplicating, so that the clock remains correct for the
  634. // current stream position.
  635. auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
  636. mDeviceStartTime -= skip;
  637. mCurrentPts += skip;
  638. }
  639. }
  640. if(audio_size <= 0)
  641. return false;
  642. if(audio_size < length)
  643. {
  644. const unsigned int rem{length - audio_size};
  645. std::fill_n(samples, rem*mFrameSize,
  646. (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
  647. mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
  648. }
  649. return true;
  650. }
  651. bool AudioState::readAudio(int sample_skip)
  652. {
  653. size_t woffset{mWritePos.load(std::memory_order_acquire)};
  654. const size_t roffset{mReadPos.load(std::memory_order_relaxed)};
  655. while(mSamplesLen > 0)
  656. {
  657. const size_t nsamples{((roffset > woffset) ? roffset-woffset-1
  658. : (roffset == 0) ? (mBufferDataSize-woffset-1)
  659. : (mBufferDataSize-woffset)) / mFrameSize};
  660. if(!nsamples) break;
  661. if(mSamplesPos < 0)
  662. {
  663. const size_t rem{std::min<size_t>(nsamples, static_cast<ALuint>(-mSamplesPos))};
  664. sample_dup(&mBufferData[woffset], mSamples, rem, mFrameSize);
  665. woffset += rem * mFrameSize;
  666. if(woffset == mBufferDataSize) woffset = 0;
  667. mWritePos.store(woffset, std::memory_order_release);
  668. mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
  669. mSamplesPos += static_cast<int>(rem);
  670. continue;
  671. }
  672. const size_t rem{std::min<size_t>(nsamples, static_cast<ALuint>(mSamplesLen-mSamplesPos))};
  673. const size_t boffset{static_cast<ALuint>(mSamplesPos) * size_t{mFrameSize}};
  674. const size_t nbytes{rem * mFrameSize};
  675. memcpy(&mBufferData[woffset], mSamples + boffset, nbytes);
  676. woffset += nbytes;
  677. if(woffset == mBufferDataSize) woffset = 0;
  678. mWritePos.store(woffset, std::memory_order_release);
  679. mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
  680. mSamplesPos += static_cast<int>(rem);
  681. while(mSamplesPos >= mSamplesLen)
  682. {
  683. mSamplesLen = decodeFrame();
  684. mSamplesPos = std::min(mSamplesLen, sample_skip);
  685. if(mSamplesLen <= 0) return false;
  686. sample_skip -= mSamplesPos;
  687. auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
  688. mDeviceStartTime -= skip;
  689. mCurrentPts += skip;
  690. }
  691. }
  692. return true;
  693. }
  694. void AL_APIENTRY AudioState::eventCallback(ALenum eventType, ALuint object, ALuint param,
  695. ALsizei length, const ALchar *message)
  696. {
  697. if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
  698. {
  699. /* Temporarily lock the source mutex to ensure it's not between
  700. * checking the processed count and going to sleep.
  701. */
  702. std::unique_lock<std::mutex>{mSrcMutex}.unlock();
  703. mSrcCond.notify_one();
  704. return;
  705. }
  706. std::cout<< "\n---- AL Event on AudioState "<<this<<" ----\nEvent: ";
  707. switch(eventType)
  708. {
  709. case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
  710. case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
  711. case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
  712. default:
  713. std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<std::dec<<
  714. std::setw(0)<<std::setfill(' '); break;
  715. }
  716. std::cout<< "\n"
  717. "Object ID: "<<object<<"\n"
  718. "Parameter: "<<param<<"\n"
  719. "Message: "<<std::string{message, static_cast<ALuint>(length)}<<"\n----"<<
  720. std::endl;
  721. if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
  722. {
  723. {
  724. std::lock_guard<std::mutex> lock{mSrcMutex};
  725. mConnected.clear(std::memory_order_release);
  726. }
  727. mSrcCond.notify_one();
  728. }
  729. }
  730. ALsizei AudioState::bufferCallback(void *data, ALsizei size)
  731. {
  732. ALsizei got{0};
  733. size_t roffset{mReadPos.load(std::memory_order_acquire)};
  734. while(got < size)
  735. {
  736. const size_t woffset{mWritePos.load(std::memory_order_relaxed)};
  737. if(woffset == roffset) break;
  738. size_t todo{((woffset < roffset) ? mBufferDataSize : woffset) - roffset};
  739. todo = std::min<size_t>(todo, static_cast<ALuint>(size-got));
  740. memcpy(data, &mBufferData[roffset], todo);
  741. data = static_cast<ALbyte*>(data) + todo;
  742. got += static_cast<ALsizei>(todo);
  743. roffset += todo;
  744. if(roffset == mBufferDataSize)
  745. roffset = 0;
  746. }
  747. mReadPos.store(roffset, std::memory_order_release);
  748. return got;
  749. }
  750. int AudioState::handler()
  751. {
  752. std::unique_lock<std::mutex> srclock{mSrcMutex, std::defer_lock};
  753. milliseconds sleep_time{AudioBufferTime / 3};
  754. struct EventControlManager {
  755. const std::array<ALenum,3> evt_types{{
  756. AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
  757. AL_EVENT_TYPE_DISCONNECTED_SOFT}};
  758. EventControlManager(milliseconds &sleep_time)
  759. {
  760. if(alEventControlSOFT)
  761. {
  762. alEventControlSOFT(static_cast<ALsizei>(evt_types.size()), evt_types.data(),
  763. AL_TRUE);
  764. alEventCallbackSOFT(&AudioState::eventCallbackC, this);
  765. sleep_time = AudioBufferTotalTime;
  766. }
  767. }
  768. ~EventControlManager()
  769. {
  770. if(alEventControlSOFT)
  771. {
  772. alEventControlSOFT(static_cast<ALsizei>(evt_types.size()), evt_types.data(),
  773. AL_FALSE);
  774. alEventCallbackSOFT(nullptr, nullptr);
  775. }
  776. }
  777. };
  778. EventControlManager event_controller{sleep_time};
  779. const bool has_bfmt_ex{alIsExtensionPresent("AL_SOFT_bformat_ex") != AL_FALSE};
  780. ALenum ambi_layout{AL_FUMA_SOFT};
  781. ALenum ambi_scale{AL_FUMA_SOFT};
  782. std::unique_ptr<uint8_t[]> samples;
  783. ALsizei buffer_len{0};
  784. /* Find a suitable format for OpenAL. */
  785. mDstChanLayout = 0;
  786. mFormat = AL_NONE;
  787. if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP
  788. || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_DBL
  789. || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_DBLP
  790. || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S32
  791. || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S32P
  792. || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S64
  793. || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_S64P)
  794. && alIsExtensionPresent("AL_EXT_FLOAT32"))
  795. {
  796. mDstSampleFmt = AV_SAMPLE_FMT_FLT;
  797. mFrameSize = 4;
  798. if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
  799. {
  800. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1)
  801. {
  802. mDstChanLayout = mCodecCtx->channel_layout;
  803. mFrameSize *= 8;
  804. mFormat = alGetEnumValue("AL_FORMAT_71CHN32");
  805. }
  806. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1
  807. || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK)
  808. {
  809. mDstChanLayout = mCodecCtx->channel_layout;
  810. mFrameSize *= 6;
  811. mFormat = alGetEnumValue("AL_FORMAT_51CHN32");
  812. }
  813. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_QUAD)
  814. {
  815. mDstChanLayout = mCodecCtx->channel_layout;
  816. mFrameSize *= 4;
  817. mFormat = alGetEnumValue("AL_FORMAT_QUAD32");
  818. }
  819. }
  820. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  821. {
  822. mDstChanLayout = mCodecCtx->channel_layout;
  823. mFrameSize *= 1;
  824. mFormat = AL_FORMAT_MONO_FLOAT32;
  825. }
  826. /* Assume 3D B-Format (ambisonics) if the channel layout is blank and
  827. * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to
  828. * have no way to specify if the source is actually B-Format (let alone
  829. * if it's 2D or 3D).
  830. */
  831. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4
  832. && alIsExtensionPresent("AL_EXT_BFORMAT"))
  833. {
  834. /* Calculate what should be the ambisonic order from the number of
  835. * channels, and confirm that's the number of channels. Opus allows
  836. * an optional non-diegetic stereo stream with the B-Format stream,
  837. * which we can ignore, so check for that too.
  838. */
  839. auto order = static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1;
  840. int channels{(order+1) * (order+1)};
  841. if(channels == mCodecCtx->channels || channels+2 == mCodecCtx->channels)
  842. {
  843. /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
  844. * is 4 channels for 3D buffers.
  845. */
  846. mFrameSize *= 4;
  847. mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32");
  848. }
  849. }
  850. if(!mFormat || mFormat == -1)
  851. {
  852. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  853. mFrameSize *= 2;
  854. mFormat = EnableUhj ? AL_FORMAT_UHJ2CHN_FLOAT32_SOFT : AL_FORMAT_STEREO_FLOAT32;
  855. }
  856. }
  857. if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
  858. {
  859. mDstSampleFmt = AV_SAMPLE_FMT_U8;
  860. mFrameSize = 1;
  861. if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
  862. {
  863. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1)
  864. {
  865. mDstChanLayout = mCodecCtx->channel_layout;
  866. mFrameSize *= 8;
  867. mFormat = alGetEnumValue("AL_FORMAT_71CHN8");
  868. }
  869. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1
  870. || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK)
  871. {
  872. mDstChanLayout = mCodecCtx->channel_layout;
  873. mFrameSize *= 6;
  874. mFormat = alGetEnumValue("AL_FORMAT_51CHN8");
  875. }
  876. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_QUAD)
  877. {
  878. mDstChanLayout = mCodecCtx->channel_layout;
  879. mFrameSize *= 4;
  880. mFormat = alGetEnumValue("AL_FORMAT_QUAD8");
  881. }
  882. }
  883. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  884. {
  885. mDstChanLayout = mCodecCtx->channel_layout;
  886. mFrameSize *= 1;
  887. mFormat = AL_FORMAT_MONO8;
  888. }
  889. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4
  890. && alIsExtensionPresent("AL_EXT_BFORMAT"))
  891. {
  892. auto order = static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1;
  893. int channels{(order+1) * (order+1)};
  894. if(channels == mCodecCtx->channels || channels+2 == mCodecCtx->channels)
  895. {
  896. mFrameSize *= 4;
  897. mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_8");
  898. }
  899. }
  900. if(!mFormat || mFormat == -1)
  901. {
  902. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  903. mFrameSize *= 2;
  904. mFormat = EnableUhj ? AL_FORMAT_UHJ2CHN8_SOFT : AL_FORMAT_STEREO8;
  905. }
  906. }
  907. if(!mFormat || mFormat == -1)
  908. {
  909. mDstSampleFmt = AV_SAMPLE_FMT_S16;
  910. mFrameSize = 2;
  911. if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
  912. {
  913. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1)
  914. {
  915. mDstChanLayout = mCodecCtx->channel_layout;
  916. mFrameSize *= 8;
  917. mFormat = alGetEnumValue("AL_FORMAT_71CHN16");
  918. }
  919. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1
  920. || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK)
  921. {
  922. mDstChanLayout = mCodecCtx->channel_layout;
  923. mFrameSize *= 6;
  924. mFormat = alGetEnumValue("AL_FORMAT_51CHN16");
  925. }
  926. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_QUAD)
  927. {
  928. mDstChanLayout = mCodecCtx->channel_layout;
  929. mFrameSize *= 4;
  930. mFormat = alGetEnumValue("AL_FORMAT_QUAD16");
  931. }
  932. }
  933. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  934. {
  935. mDstChanLayout = mCodecCtx->channel_layout;
  936. mFrameSize *= 1;
  937. mFormat = AL_FORMAT_MONO16;
  938. }
  939. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4
  940. && alIsExtensionPresent("AL_EXT_BFORMAT"))
  941. {
  942. auto order = static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1;
  943. int channels{(order+1) * (order+1)};
  944. if(channels == mCodecCtx->channels || channels+2 == mCodecCtx->channels)
  945. {
  946. mFrameSize *= 4;
  947. mFormat = alGetEnumValue("AL_FORMAT_BFORMAT3D_16");
  948. }
  949. }
  950. if(!mFormat || mFormat == -1)
  951. {
  952. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  953. mFrameSize *= 2;
  954. mFormat = EnableUhj ? AL_FORMAT_UHJ2CHN16_SOFT : AL_FORMAT_STEREO16;
  955. }
  956. }
  957. mSamples = nullptr;
  958. mSamplesMax = 0;
  959. mSamplesPos = 0;
  960. mSamplesLen = 0;
  961. mDecodedFrame.reset(av_frame_alloc());
  962. if(!mDecodedFrame)
  963. {
  964. std::cerr<< "Failed to allocate audio frame" <<std::endl;
  965. return 0;
  966. }
  967. if(!mDstChanLayout)
  968. {
  969. /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
  970. * we have to drop any extra channels.
  971. */
  972. mSwresCtx.reset(swr_alloc_set_opts(nullptr,
  973. (1_i64<<4)-1, mDstSampleFmt, mCodecCtx->sample_rate,
  974. (1_i64<<mCodecCtx->channels)-1, mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
  975. 0, nullptr));
  976. /* Note that ffmpeg/libavcodec has no method to check the ambisonic
  977. * channel order and normalization, so we can only assume AmbiX as the
  978. * defacto-standard. This is not true for .amb files, which use FuMa.
  979. */
  980. std::vector<double> mtx(64*64, 0.0);
  981. ambi_layout = AL_ACN_SOFT;
  982. ambi_scale = AL_SN3D_SOFT;
  983. if(has_bfmt_ex)
  984. {
  985. /* An identity matrix that doesn't remix any channels. */
  986. std::cout<< "Found AL_SOFT_bformat_ex" <<std::endl;
  987. mtx[0 + 0*64] = 1.0;
  988. mtx[1 + 1*64] = 1.0;
  989. mtx[2 + 2*64] = 1.0;
  990. mtx[3 + 3*64] = 1.0;
  991. }
  992. else
  993. {
  994. std::cout<< "Found AL_EXT_BFORMAT" <<std::endl;
  995. /* Without AL_SOFT_bformat_ex, OpenAL only supports FuMa channel
  996. * ordering and normalization, so a custom matrix is needed to
  997. * scale and reorder the source from AmbiX.
  998. */
  999. mtx[0 + 0*64] = std::sqrt(0.5);
  1000. mtx[3 + 1*64] = 1.0;
  1001. mtx[1 + 2*64] = 1.0;
  1002. mtx[2 + 3*64] = 1.0;
  1003. }
  1004. swr_set_matrix(mSwresCtx.get(), mtx.data(), 64);
  1005. }
  1006. else
  1007. mSwresCtx.reset(swr_alloc_set_opts(nullptr,
  1008. static_cast<int64_t>(mDstChanLayout), mDstSampleFmt, mCodecCtx->sample_rate,
  1009. mCodecCtx->channel_layout ? static_cast<int64_t>(mCodecCtx->channel_layout)
  1010. : av_get_default_channel_layout(mCodecCtx->channels),
  1011. mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
  1012. 0, nullptr));
  1013. if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
  1014. {
  1015. std::cerr<< "Failed to initialize audio converter" <<std::endl;
  1016. return 0;
  1017. }
  1018. alGenBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data());
  1019. alGenSources(1, &mSource);
  1020. if(DirectOutMode)
  1021. alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, DirectOutMode);
  1022. if(EnableWideStereo)
  1023. {
  1024. const float angles[2]{static_cast<float>(M_PI / 3.0), static_cast<float>(-M_PI / 3.0)};
  1025. alSourcefv(mSource, AL_STEREO_ANGLES, angles);
  1026. }
  1027. if(has_bfmt_ex)
  1028. {
  1029. for(ALuint bufid : mBuffers)
  1030. {
  1031. alBufferi(bufid, AL_AMBISONIC_LAYOUT_SOFT, ambi_layout);
  1032. alBufferi(bufid, AL_AMBISONIC_SCALING_SOFT, ambi_scale);
  1033. }
  1034. }
  1035. #ifdef AL_SOFT_UHJ
  1036. if(EnableSuperStereo)
  1037. alSourcei(mSource, AL_STEREO_MODE_SOFT, AL_SUPER_STEREO_SOFT);
  1038. #endif
  1039. if(alGetError() != AL_NO_ERROR)
  1040. return 0;
  1041. bool callback_ok{false};
  1042. if(alBufferCallbackSOFT)
  1043. {
  1044. alBufferCallbackSOFT(mBuffers[0], mFormat, mCodecCtx->sample_rate, bufferCallbackC, this);
  1045. alSourcei(mSource, AL_BUFFER, static_cast<ALint>(mBuffers[0]));
  1046. if(alGetError() != AL_NO_ERROR)
  1047. {
  1048. fprintf(stderr, "Failed to set buffer callback\n");
  1049. alSourcei(mSource, AL_BUFFER, 0);
  1050. }
  1051. else
  1052. {
  1053. mBufferDataSize = static_cast<size_t>(duration_cast<seconds>(mCodecCtx->sample_rate *
  1054. AudioBufferTotalTime).count()) * mFrameSize;
  1055. mBufferData = std::make_unique<uint8_t[]>(mBufferDataSize);
  1056. std::fill_n(mBufferData.get(), mBufferDataSize, uint8_t{});
  1057. mReadPos.store(0, std::memory_order_relaxed);
  1058. mWritePos.store(mBufferDataSize/mFrameSize/2*mFrameSize, std::memory_order_relaxed);
  1059. ALCint refresh{};
  1060. alcGetIntegerv(alcGetContextsDevice(alcGetCurrentContext()), ALC_REFRESH, 1, &refresh);
  1061. sleep_time = milliseconds{seconds{1}} / refresh;
  1062. callback_ok = true;
  1063. }
  1064. }
  1065. if(!callback_ok)
  1066. buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
  1067. AudioBufferTime).count() * mFrameSize);
  1068. if(buffer_len > 0)
  1069. samples = std::make_unique<uint8_t[]>(static_cast<ALuint>(buffer_len));
  1070. /* Prefill the codec buffer. */
  1071. auto packet_sender = [this]()
  1072. {
  1073. while(1)
  1074. {
  1075. const int ret{mQueue.sendPacket(mCodecCtx.get())};
  1076. if(ret == AVErrorEOF) break;
  1077. }
  1078. };
  1079. auto sender = std::async(std::launch::async, packet_sender);
  1080. srclock.lock();
  1081. if(alcGetInteger64vSOFT)
  1082. {
  1083. int64_t devtime{};
  1084. alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT,
  1085. 1, &devtime);
  1086. mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
  1087. }
  1088. mSamplesLen = decodeFrame();
  1089. if(mSamplesLen > 0)
  1090. {
  1091. mSamplesPos = std::min(mSamplesLen, getSync());
  1092. auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate;
  1093. mDeviceStartTime -= skip;
  1094. mCurrentPts += skip;
  1095. }
  1096. while(1)
  1097. {
  1098. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1099. {
  1100. /* If mQuit is set, drain frames until we can't get more audio,
  1101. * indicating we've reached the flush packet and the packet sender
  1102. * will also quit.
  1103. */
  1104. do {
  1105. mSamplesLen = decodeFrame();
  1106. mSamplesPos = mSamplesLen;
  1107. } while(mSamplesLen > 0);
  1108. goto finish;
  1109. }
  1110. ALenum state;
  1111. if(mBufferDataSize > 0)
  1112. {
  1113. alGetSourcei(mSource, AL_SOURCE_STATE, &state);
  1114. /* If mQuit is not set, don't quit even if there's no more audio,
  1115. * so what's buffered has a chance to play to the real end.
  1116. */
  1117. readAudio(getSync());
  1118. }
  1119. else
  1120. {
  1121. ALint processed, queued;
  1122. /* First remove any processed buffers. */
  1123. alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
  1124. while(processed > 0)
  1125. {
  1126. ALuint bid;
  1127. alSourceUnqueueBuffers(mSource, 1, &bid);
  1128. --processed;
  1129. }
  1130. /* Refill the buffer queue. */
  1131. int sync_skip{getSync()};
  1132. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  1133. while(static_cast<ALuint>(queued) < mBuffers.size())
  1134. {
  1135. /* Read the next chunk of data, filling the buffer, and queue
  1136. * it on the source.
  1137. */
  1138. if(!readAudio(samples.get(), static_cast<ALuint>(buffer_len), sync_skip))
  1139. break;
  1140. const ALuint bufid{mBuffers[mBufferIdx]};
  1141. mBufferIdx = static_cast<ALuint>((mBufferIdx+1) % mBuffers.size());
  1142. alBufferData(bufid, mFormat, samples.get(), buffer_len, mCodecCtx->sample_rate);
  1143. alSourceQueueBuffers(mSource, 1, &bufid);
  1144. ++queued;
  1145. }
  1146. /* Check that the source is playing. */
  1147. alGetSourcei(mSource, AL_SOURCE_STATE, &state);
  1148. if(state == AL_STOPPED)
  1149. {
  1150. /* AL_STOPPED means there was an underrun. Clear the buffer
  1151. * queue since this likely means we're late, and rewind the
  1152. * source to get it back into an AL_INITIAL state.
  1153. */
  1154. alSourceRewind(mSource);
  1155. alSourcei(mSource, AL_BUFFER, 0);
  1156. if(alcGetInteger64vSOFT)
  1157. {
  1158. /* Also update the device start time with the current
  1159. * device clock, so the decoder knows we're running behind.
  1160. */
  1161. int64_t devtime{};
  1162. alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
  1163. ALC_DEVICE_CLOCK_SOFT, 1, &devtime);
  1164. mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
  1165. }
  1166. continue;
  1167. }
  1168. }
  1169. /* (re)start the source if needed, and wait for a buffer to finish */
  1170. if(state != AL_PLAYING && state != AL_PAUSED)
  1171. {
  1172. if(!startPlayback())
  1173. break;
  1174. }
  1175. if(ALenum err{alGetError()})
  1176. std::cerr<< "Got AL error: 0x"<<std::hex<<err<<std::dec
  1177. << " ("<<alGetString(err)<<")" <<std::endl;
  1178. mSrcCond.wait_for(srclock, sleep_time);
  1179. }
  1180. finish:
  1181. alSourceRewind(mSource);
  1182. alSourcei(mSource, AL_BUFFER, 0);
  1183. srclock.unlock();
  1184. return 0;
  1185. }
  1186. nanoseconds VideoState::getClock()
  1187. {
  1188. /* NOTE: This returns incorrect times while not playing. */
  1189. std::lock_guard<std::mutex> _{mDispPtsMutex};
  1190. if(mDisplayPtsTime == microseconds::min())
  1191. return nanoseconds::zero();
  1192. auto delta = get_avtime() - mDisplayPtsTime;
  1193. return mDisplayPts + delta;
  1194. }
  1195. /* Called by VideoState::updateVideo to display the next video frame. */
  1196. void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer, AVFrame *frame)
  1197. {
  1198. if(!mImage)
  1199. return;
  1200. double aspect_ratio;
  1201. int win_w, win_h;
  1202. int w, h, x, y;
  1203. int frame_width{frame->width - static_cast<int>(frame->crop_left + frame->crop_right)};
  1204. int frame_height{frame->height - static_cast<int>(frame->crop_top + frame->crop_bottom)};
  1205. if(frame->sample_aspect_ratio.num == 0)
  1206. aspect_ratio = 0.0;
  1207. else
  1208. {
  1209. aspect_ratio = av_q2d(frame->sample_aspect_ratio) * frame_width /
  1210. frame_height;
  1211. }
  1212. if(aspect_ratio <= 0.0)
  1213. aspect_ratio = static_cast<double>(frame_width) / frame_height;
  1214. SDL_GetWindowSize(screen, &win_w, &win_h);
  1215. h = win_h;
  1216. w = (static_cast<int>(std::rint(h * aspect_ratio)) + 3) & ~3;
  1217. if(w > win_w)
  1218. {
  1219. w = win_w;
  1220. h = (static_cast<int>(std::rint(w / aspect_ratio)) + 3) & ~3;
  1221. }
  1222. x = (win_w - w) / 2;
  1223. y = (win_h - h) / 2;
  1224. SDL_Rect src_rect{ static_cast<int>(frame->crop_left), static_cast<int>(frame->crop_top),
  1225. frame_width, frame_height };
  1226. SDL_Rect dst_rect{ x, y, w, h };
  1227. SDL_RenderCopy(renderer, mImage, &src_rect, &dst_rect);
  1228. SDL_RenderPresent(renderer);
  1229. }
  1230. /* Called regularly on the main thread where the SDL_Renderer was created. It
  1231. * handles updating the textures of decoded frames and displaying the latest
  1232. * frame.
  1233. */
  1234. void VideoState::updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw)
  1235. {
  1236. size_t read_idx{mPictQRead.load(std::memory_order_relaxed)};
  1237. Picture *vp{&mPictQ[read_idx]};
  1238. auto clocktime = mMovie.getMasterClock();
  1239. bool updated{false};
  1240. while(1)
  1241. {
  1242. size_t next_idx{(read_idx+1)%mPictQ.size()};
  1243. if(next_idx == mPictQWrite.load(std::memory_order_acquire))
  1244. break;
  1245. Picture *nextvp{&mPictQ[next_idx]};
  1246. if(clocktime < nextvp->mPts && !mMovie.mQuit.load(std::memory_order_relaxed))
  1247. {
  1248. /* For the first update, ensure the first frame gets shown. */
  1249. if(!mFirstUpdate || updated)
  1250. break;
  1251. }
  1252. vp = nextvp;
  1253. updated = true;
  1254. read_idx = next_idx;
  1255. }
  1256. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1257. {
  1258. if(mEOS)
  1259. mFinalUpdate = true;
  1260. mPictQRead.store(read_idx, std::memory_order_release);
  1261. std::unique_lock<std::mutex>{mPictQMutex}.unlock();
  1262. mPictQCond.notify_one();
  1263. return;
  1264. }
  1265. AVFrame *frame{vp->mFrame.get()};
  1266. if(updated)
  1267. {
  1268. mPictQRead.store(read_idx, std::memory_order_release);
  1269. std::unique_lock<std::mutex>{mPictQMutex}.unlock();
  1270. mPictQCond.notify_one();
  1271. /* allocate or resize the buffer! */
  1272. bool fmt_updated{false};
  1273. if(!mImage || mWidth != frame->width || mHeight != frame->height)
  1274. {
  1275. fmt_updated = true;
  1276. if(mImage)
  1277. SDL_DestroyTexture(mImage);
  1278. mImage = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
  1279. frame->width, frame->height);
  1280. if(!mImage)
  1281. std::cerr<< "Failed to create YV12 texture!" <<std::endl;
  1282. mWidth = frame->width;
  1283. mHeight = frame->height;
  1284. }
  1285. int frame_width{frame->width - static_cast<int>(frame->crop_left + frame->crop_right)};
  1286. int frame_height{frame->height - static_cast<int>(frame->crop_top + frame->crop_bottom)};
  1287. if(mFirstUpdate && frame_width > 0 && frame_height > 0)
  1288. {
  1289. /* For the first update, set the window size to the video size. */
  1290. mFirstUpdate = false;
  1291. if(frame->sample_aspect_ratio.den != 0)
  1292. {
  1293. double aspect_ratio = av_q2d(frame->sample_aspect_ratio);
  1294. if(aspect_ratio >= 1.0)
  1295. frame_width = static_cast<int>(frame_width*aspect_ratio + 0.5);
  1296. else if(aspect_ratio > 0.0)
  1297. frame_height = static_cast<int>(frame_height/aspect_ratio + 0.5);
  1298. }
  1299. SDL_SetWindowSize(screen, frame_width, frame_height);
  1300. }
  1301. if(mImage)
  1302. {
  1303. void *pixels{nullptr};
  1304. int pitch{0};
  1305. if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
  1306. SDL_UpdateYUVTexture(mImage, nullptr,
  1307. frame->data[0], frame->linesize[0],
  1308. frame->data[1], frame->linesize[1],
  1309. frame->data[2], frame->linesize[2]
  1310. );
  1311. else if(SDL_LockTexture(mImage, nullptr, &pixels, &pitch) != 0)
  1312. std::cerr<< "Failed to lock texture" <<std::endl;
  1313. else
  1314. {
  1315. // Convert the image into YUV format that SDL uses
  1316. int w{frame->width};
  1317. int h{frame->height};
  1318. if(!mSwscaleCtx || fmt_updated)
  1319. {
  1320. mSwscaleCtx.reset(sws_getContext(
  1321. w, h, mCodecCtx->pix_fmt,
  1322. w, h, AV_PIX_FMT_YUV420P, 0,
  1323. nullptr, nullptr, nullptr
  1324. ));
  1325. }
  1326. /* point pict at the queue */
  1327. uint8_t *pict_data[3];
  1328. pict_data[0] = static_cast<uint8_t*>(pixels);
  1329. pict_data[1] = pict_data[0] + w*h;
  1330. pict_data[2] = pict_data[1] + w*h/4;
  1331. int pict_linesize[3];
  1332. pict_linesize[0] = pitch;
  1333. pict_linesize[1] = pitch / 2;
  1334. pict_linesize[2] = pitch / 2;
  1335. sws_scale(mSwscaleCtx.get(), reinterpret_cast<uint8_t**>(frame->data), frame->linesize,
  1336. 0, h, pict_data, pict_linesize);
  1337. SDL_UnlockTexture(mImage);
  1338. }
  1339. redraw = true;
  1340. }
  1341. }
  1342. if(redraw)
  1343. {
  1344. /* Show the picture! */
  1345. display(screen, renderer, frame);
  1346. }
  1347. if(updated)
  1348. {
  1349. auto disp_time = get_avtime();
  1350. std::lock_guard<std::mutex> _{mDispPtsMutex};
  1351. mDisplayPts = vp->mPts;
  1352. mDisplayPtsTime = disp_time;
  1353. }
  1354. if(mEOS.load(std::memory_order_acquire))
  1355. {
  1356. if((read_idx+1)%mPictQ.size() == mPictQWrite.load(std::memory_order_acquire))
  1357. {
  1358. mFinalUpdate = true;
  1359. std::unique_lock<std::mutex>{mPictQMutex}.unlock();
  1360. mPictQCond.notify_one();
  1361. }
  1362. }
  1363. }
  1364. int VideoState::handler()
  1365. {
  1366. std::for_each(mPictQ.begin(), mPictQ.end(),
  1367. [](Picture &pict) -> void
  1368. { pict.mFrame = AVFramePtr{av_frame_alloc()}; });
  1369. /* Prefill the codec buffer. */
  1370. auto packet_sender = [this]()
  1371. {
  1372. while(1)
  1373. {
  1374. const int ret{mQueue.sendPacket(mCodecCtx.get())};
  1375. if(ret == AVErrorEOF) break;
  1376. }
  1377. };
  1378. auto sender = std::async(std::launch::async, packet_sender);
  1379. {
  1380. std::lock_guard<std::mutex> _{mDispPtsMutex};
  1381. mDisplayPtsTime = get_avtime();
  1382. }
  1383. auto current_pts = nanoseconds::zero();
  1384. while(1)
  1385. {
  1386. size_t write_idx{mPictQWrite.load(std::memory_order_relaxed)};
  1387. Picture *vp{&mPictQ[write_idx]};
  1388. /* Retrieve video frame. */
  1389. AVFrame *decoded_frame{vp->mFrame.get()};
  1390. while(int ret{mQueue.receiveFrame(mCodecCtx.get(), decoded_frame)})
  1391. {
  1392. if(ret == AVErrorEOF) goto finish;
  1393. std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
  1394. }
  1395. /* Get the PTS for this frame. */
  1396. if(decoded_frame->best_effort_timestamp != AVNoPtsValue)
  1397. current_pts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
  1398. static_cast<double>(decoded_frame->best_effort_timestamp)});
  1399. vp->mPts = current_pts;
  1400. /* Update the video clock to the next expected PTS. */
  1401. auto frame_delay = av_q2d(mCodecCtx->time_base);
  1402. frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5);
  1403. current_pts += duration_cast<nanoseconds>(seconds_d64{frame_delay});
  1404. /* Put the frame in the queue to be loaded into a texture and displayed
  1405. * by the rendering thread.
  1406. */
  1407. write_idx = (write_idx+1)%mPictQ.size();
  1408. mPictQWrite.store(write_idx, std::memory_order_release);
  1409. if(write_idx == mPictQRead.load(std::memory_order_acquire))
  1410. {
  1411. /* Wait until we have space for a new pic */
  1412. std::unique_lock<std::mutex> lock{mPictQMutex};
  1413. while(write_idx == mPictQRead.load(std::memory_order_acquire))
  1414. mPictQCond.wait(lock);
  1415. }
  1416. }
  1417. finish:
  1418. mEOS = true;
  1419. std::unique_lock<std::mutex> lock{mPictQMutex};
  1420. while(!mFinalUpdate) mPictQCond.wait(lock);
  1421. return 0;
  1422. }
  1423. int MovieState::decode_interrupt_cb(void *ctx)
  1424. {
  1425. return static_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
  1426. }
  1427. bool MovieState::prepare()
  1428. {
  1429. AVIOContext *avioctx{nullptr};
  1430. AVIOInterruptCB intcb{decode_interrupt_cb, this};
  1431. if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
  1432. {
  1433. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1434. return false;
  1435. }
  1436. mIOContext.reset(avioctx);
  1437. /* Open movie file. If avformat_open_input fails it will automatically free
  1438. * this context, so don't set it onto a smart pointer yet.
  1439. */
  1440. AVFormatContext *fmtctx{avformat_alloc_context()};
  1441. fmtctx->pb = mIOContext.get();
  1442. fmtctx->interrupt_callback = intcb;
  1443. if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
  1444. {
  1445. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1446. return false;
  1447. }
  1448. mFormatCtx.reset(fmtctx);
  1449. /* Retrieve stream information */
  1450. if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
  1451. {
  1452. std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
  1453. return false;
  1454. }
  1455. /* Dump information about file onto standard error */
  1456. av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
  1457. mParseThread = std::thread{std::mem_fn(&MovieState::parse_handler), this};
  1458. std::unique_lock<std::mutex> slock{mStartupMutex};
  1459. while(!mStartupDone) mStartupCond.wait(slock);
  1460. return true;
  1461. }
  1462. void MovieState::setTitle(SDL_Window *window)
  1463. {
  1464. auto pos1 = mFilename.rfind('/');
  1465. auto pos2 = mFilename.rfind('\\');
  1466. auto fpos = ((pos1 == std::string::npos) ? pos2 :
  1467. (pos2 == std::string::npos) ? pos1 :
  1468. std::max(pos1, pos2)) + 1;
  1469. SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
  1470. }
  1471. nanoseconds MovieState::getClock()
  1472. {
  1473. if(mClockBase == microseconds::min())
  1474. return nanoseconds::zero();
  1475. return get_avtime() - mClockBase;
  1476. }
  1477. nanoseconds MovieState::getMasterClock()
  1478. {
  1479. if(mAVSyncType == SyncMaster::Video && mVideo.mStream)
  1480. return mVideo.getClock();
  1481. if(mAVSyncType == SyncMaster::Audio && mAudio.mStream)
  1482. return mAudio.getClock();
  1483. return getClock();
  1484. }
  1485. nanoseconds MovieState::getDuration()
  1486. { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
  1487. int MovieState::streamComponentOpen(unsigned int stream_index)
  1488. {
  1489. if(stream_index >= mFormatCtx->nb_streams)
  1490. return -1;
  1491. /* Get a pointer to the codec context for the stream, and open the
  1492. * associated codec.
  1493. */
  1494. AVCodecCtxPtr avctx{avcodec_alloc_context3(nullptr)};
  1495. if(!avctx) return -1;
  1496. if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
  1497. return -1;
  1498. const AVCodec *codec{avcodec_find_decoder(avctx->codec_id)};
  1499. if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
  1500. {
  1501. std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
  1502. << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
  1503. return -1;
  1504. }
  1505. /* Initialize and start the media type handler */
  1506. switch(avctx->codec_type)
  1507. {
  1508. case AVMEDIA_TYPE_AUDIO:
  1509. mAudio.mStream = mFormatCtx->streams[stream_index];
  1510. mAudio.mCodecCtx = std::move(avctx);
  1511. break;
  1512. case AVMEDIA_TYPE_VIDEO:
  1513. mVideo.mStream = mFormatCtx->streams[stream_index];
  1514. mVideo.mCodecCtx = std::move(avctx);
  1515. break;
  1516. default:
  1517. return -1;
  1518. }
  1519. return static_cast<int>(stream_index);
  1520. }
  1521. int MovieState::parse_handler()
  1522. {
  1523. auto &audio_queue = mAudio.mQueue;
  1524. auto &video_queue = mVideo.mQueue;
  1525. int video_index{-1};
  1526. int audio_index{-1};
  1527. /* Find the first video and audio streams */
  1528. for(unsigned int i{0u};i < mFormatCtx->nb_streams;i++)
  1529. {
  1530. auto codecpar = mFormatCtx->streams[i]->codecpar;
  1531. if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !DisableVideo && video_index < 0)
  1532. video_index = streamComponentOpen(i);
  1533. else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
  1534. audio_index = streamComponentOpen(i);
  1535. }
  1536. {
  1537. std::unique_lock<std::mutex> slock{mStartupMutex};
  1538. mStartupDone = true;
  1539. }
  1540. mStartupCond.notify_all();
  1541. if(video_index < 0 && audio_index < 0)
  1542. {
  1543. std::cerr<< mFilename<<": could not open codecs" <<std::endl;
  1544. mQuit = true;
  1545. }
  1546. /* Set the base time 750ms ahead of the current av time. */
  1547. mClockBase = get_avtime() + milliseconds{750};
  1548. if(audio_index >= 0)
  1549. mAudioThread = std::thread{std::mem_fn(&AudioState::handler), &mAudio};
  1550. if(video_index >= 0)
  1551. mVideoThread = std::thread{std::mem_fn(&VideoState::handler), &mVideo};
  1552. /* Main packet reading/dispatching loop */
  1553. AVPacketPtr packet{av_packet_alloc()};
  1554. while(!mQuit.load(std::memory_order_relaxed))
  1555. {
  1556. if(av_read_frame(mFormatCtx.get(), packet.get()) < 0)
  1557. break;
  1558. /* Copy the packet into the queue it's meant for. */
  1559. if(packet->stream_index == video_index)
  1560. {
  1561. while(!mQuit.load(std::memory_order_acquire) && !video_queue.put(packet.get()))
  1562. std::this_thread::sleep_for(milliseconds{100});
  1563. }
  1564. else if(packet->stream_index == audio_index)
  1565. {
  1566. while(!mQuit.load(std::memory_order_acquire) && !audio_queue.put(packet.get()))
  1567. std::this_thread::sleep_for(milliseconds{100});
  1568. }
  1569. av_packet_unref(packet.get());
  1570. }
  1571. /* Finish the queues so the receivers know nothing more is coming. */
  1572. video_queue.setFinished();
  1573. audio_queue.setFinished();
  1574. /* all done - wait for it */
  1575. if(mVideoThread.joinable())
  1576. mVideoThread.join();
  1577. if(mAudioThread.joinable())
  1578. mAudioThread.join();
  1579. mVideo.mEOS = true;
  1580. std::unique_lock<std::mutex> lock{mVideo.mPictQMutex};
  1581. while(!mVideo.mFinalUpdate)
  1582. mVideo.mPictQCond.wait(lock);
  1583. lock.unlock();
  1584. SDL_Event evt{};
  1585. evt.user.type = FF_MOVIE_DONE_EVENT;
  1586. SDL_PushEvent(&evt);
  1587. return 0;
  1588. }
  1589. void MovieState::stop()
  1590. {
  1591. mQuit = true;
  1592. mAudio.mQueue.flush();
  1593. mVideo.mQueue.flush();
  1594. }
  1595. // Helper class+method to print the time with human-readable formatting.
  1596. struct PrettyTime {
  1597. seconds mTime;
  1598. };
  1599. std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
  1600. {
  1601. using hours = std::chrono::hours;
  1602. using minutes = std::chrono::minutes;
  1603. seconds t{rhs.mTime};
  1604. if(t.count() < 0)
  1605. {
  1606. os << '-';
  1607. t *= -1;
  1608. }
  1609. // Only handle up to hour formatting
  1610. if(t >= hours{1})
  1611. os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
  1612. << (duration_cast<minutes>(t).count() % 60) << 'm';
  1613. else
  1614. os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
  1615. os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
  1616. << std::setfill(' ');
  1617. return os;
  1618. }
  1619. } // namespace
  1620. int main(int argc, char *argv[])
  1621. {
  1622. std::unique_ptr<MovieState> movState;
  1623. if(argc < 2)
  1624. {
  1625. std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
  1626. return 1;
  1627. }
  1628. /* Register all formats and codecs */
  1629. #if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100))
  1630. av_register_all();
  1631. #endif
  1632. /* Initialize networking protocols */
  1633. avformat_network_init();
  1634. if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS))
  1635. {
  1636. std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
  1637. return 1;
  1638. }
  1639. /* Make a window to put our video */
  1640. SDL_Window *screen{SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE)};
  1641. if(!screen)
  1642. {
  1643. std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
  1644. return 1;
  1645. }
  1646. /* Make a renderer to handle the texture image surface and rendering. */
  1647. Uint32 render_flags{SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC};
  1648. SDL_Renderer *renderer{SDL_CreateRenderer(screen, -1, render_flags)};
  1649. if(renderer)
  1650. {
  1651. SDL_RendererInfo rinf{};
  1652. bool ok{false};
  1653. /* Make sure the renderer supports IYUV textures. If not, fallback to a
  1654. * software renderer. */
  1655. if(SDL_GetRendererInfo(renderer, &rinf) == 0)
  1656. {
  1657. for(Uint32 i{0u};!ok && i < rinf.num_texture_formats;i++)
  1658. ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
  1659. }
  1660. if(!ok)
  1661. {
  1662. std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
  1663. SDL_DestroyRenderer(renderer);
  1664. renderer = nullptr;
  1665. }
  1666. }
  1667. if(!renderer)
  1668. {
  1669. render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
  1670. renderer = SDL_CreateRenderer(screen, -1, render_flags);
  1671. }
  1672. if(!renderer)
  1673. {
  1674. std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
  1675. return 1;
  1676. }
  1677. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1678. SDL_RenderFillRect(renderer, nullptr);
  1679. SDL_RenderPresent(renderer);
  1680. /* Open an audio device */
  1681. ++argv; --argc;
  1682. if(InitAL(&argv, &argc))
  1683. {
  1684. std::cerr<< "Failed to set up audio device" <<std::endl;
  1685. return 1;
  1686. }
  1687. {
  1688. auto device = alcGetContextsDevice(alcGetCurrentContext());
  1689. if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
  1690. {
  1691. std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
  1692. alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
  1693. alcGetProcAddress(device, "alcGetInteger64vSOFT")
  1694. );
  1695. }
  1696. }
  1697. if(alIsExtensionPresent("AL_SOFT_source_latency"))
  1698. {
  1699. std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
  1700. alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
  1701. alGetProcAddress("alGetSourcei64vSOFT")
  1702. );
  1703. }
  1704. if(alIsExtensionPresent("AL_SOFT_events"))
  1705. {
  1706. std::cout<< "Found AL_SOFT_events" <<std::endl;
  1707. alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
  1708. alGetProcAddress("alEventControlSOFT"));
  1709. alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
  1710. alGetProcAddress("alEventCallbackSOFT"));
  1711. }
  1712. if(alIsExtensionPresent("AL_SOFT_callback_buffer"))
  1713. {
  1714. std::cout<< "Found AL_SOFT_callback_buffer" <<std::endl;
  1715. alBufferCallbackSOFT = reinterpret_cast<LPALBUFFERCALLBACKSOFT>(
  1716. alGetProcAddress("alBufferCallbackSOFT"));
  1717. }
  1718. int fileidx{0};
  1719. for(;fileidx < argc;++fileidx)
  1720. {
  1721. if(strcmp(argv[fileidx], "-direct") == 0)
  1722. {
  1723. if(alIsExtensionPresent("AL_SOFT_direct_channels_remix"))
  1724. {
  1725. std::cout<< "Found AL_SOFT_direct_channels_remix" <<std::endl;
  1726. DirectOutMode = AL_REMIX_UNMATCHED_SOFT;
  1727. }
  1728. else if(alIsExtensionPresent("AL_SOFT_direct_channels"))
  1729. {
  1730. std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
  1731. DirectOutMode = AL_DROP_UNMATCHED_SOFT;
  1732. }
  1733. else
  1734. std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
  1735. }
  1736. else if(strcmp(argv[fileidx], "-wide") == 0)
  1737. {
  1738. if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
  1739. std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
  1740. else
  1741. {
  1742. std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
  1743. EnableWideStereo = true;
  1744. }
  1745. }
  1746. else if(strcmp(argv[fileidx], "-uhj") == 0)
  1747. {
  1748. if(!alIsExtensionPresent("AL_SOFT_UHJ"))
  1749. std::cerr<< "AL_SOFT_UHJ not supported for UHJ decoding" <<std::endl;
  1750. else
  1751. {
  1752. std::cout<< "Found AL_SOFT_UHJ" <<std::endl;
  1753. EnableUhj = true;
  1754. }
  1755. }
  1756. else if(strcmp(argv[fileidx], "-superstereo") == 0)
  1757. {
  1758. if(!alIsExtensionPresent("AL_SOFT_UHJ"))
  1759. std::cerr<< "AL_SOFT_UHJ not supported for Super Stereo decoding" <<std::endl;
  1760. else
  1761. {
  1762. std::cout<< "Found AL_SOFT_UHJ (Super Stereo)" <<std::endl;
  1763. EnableSuperStereo = true;
  1764. }
  1765. }
  1766. else if(strcmp(argv[fileidx], "-novideo") == 0)
  1767. DisableVideo = true;
  1768. else
  1769. break;
  1770. }
  1771. while(fileidx < argc && !movState)
  1772. {
  1773. movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
  1774. if(!movState->prepare()) movState = nullptr;
  1775. }
  1776. if(!movState)
  1777. {
  1778. std::cerr<< "Could not start a video" <<std::endl;
  1779. return 1;
  1780. }
  1781. movState->setTitle(screen);
  1782. /* Default to going to the next movie at the end of one. */
  1783. enum class EomAction {
  1784. Next, Quit
  1785. } eom_action{EomAction::Next};
  1786. seconds last_time{seconds::min()};
  1787. while(1)
  1788. {
  1789. /* SDL_WaitEventTimeout is broken, just force a 10ms sleep. */
  1790. std::this_thread::sleep_for(milliseconds{10});
  1791. auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
  1792. if(cur_time != last_time)
  1793. {
  1794. auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
  1795. std::cout<< " \r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
  1796. last_time = cur_time;
  1797. }
  1798. bool force_redraw{false};
  1799. SDL_Event event{};
  1800. while(SDL_PollEvent(&event) != 0)
  1801. {
  1802. switch(event.type)
  1803. {
  1804. case SDL_KEYDOWN:
  1805. switch(event.key.keysym.sym)
  1806. {
  1807. case SDLK_ESCAPE:
  1808. movState->stop();
  1809. eom_action = EomAction::Quit;
  1810. break;
  1811. case SDLK_n:
  1812. movState->stop();
  1813. eom_action = EomAction::Next;
  1814. break;
  1815. default:
  1816. break;
  1817. }
  1818. break;
  1819. case SDL_WINDOWEVENT:
  1820. switch(event.window.event)
  1821. {
  1822. case SDL_WINDOWEVENT_RESIZED:
  1823. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1824. SDL_RenderFillRect(renderer, nullptr);
  1825. force_redraw = true;
  1826. break;
  1827. case SDL_WINDOWEVENT_EXPOSED:
  1828. force_redraw = true;
  1829. break;
  1830. default:
  1831. break;
  1832. }
  1833. break;
  1834. case SDL_QUIT:
  1835. movState->stop();
  1836. eom_action = EomAction::Quit;
  1837. break;
  1838. case FF_MOVIE_DONE_EVENT:
  1839. std::cout<<'\n';
  1840. last_time = seconds::min();
  1841. if(eom_action != EomAction::Quit)
  1842. {
  1843. movState = nullptr;
  1844. while(fileidx < argc && !movState)
  1845. {
  1846. movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}};
  1847. if(!movState->prepare()) movState = nullptr;
  1848. }
  1849. if(movState)
  1850. {
  1851. movState->setTitle(screen);
  1852. break;
  1853. }
  1854. }
  1855. /* Nothing more to play. Shut everything down and quit. */
  1856. movState = nullptr;
  1857. CloseAL();
  1858. SDL_DestroyRenderer(renderer);
  1859. renderer = nullptr;
  1860. SDL_DestroyWindow(screen);
  1861. screen = nullptr;
  1862. SDL_Quit();
  1863. exit(0);
  1864. default:
  1865. break;
  1866. }
  1867. }
  1868. movState->mVideo.updateVideo(screen, renderer, force_redraw);
  1869. }
  1870. std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
  1871. return 1;
  1872. }