🛠️🐜 Antkeeper superbuild with dependencies included https://antkeeper.com
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1985 lines
64 KiB

  1. /*
  2. * An example showing how to play a stream sync'd to video, using ffmpeg.
  3. *
  4. * Requires C++11.
  5. */
  6. #include <condition_variable>
  7. #include <functional>
  8. #include <algorithm>
  9. #include <iostream>
  10. #include <iomanip>
  11. #include <cstring>
  12. #include <limits>
  13. #include <thread>
  14. #include <chrono>
  15. #include <atomic>
  16. #include <vector>
  17. #include <mutex>
  18. #include <deque>
  19. #include <array>
  20. #include <cmath>
  21. #include <string>
  22. extern "C" {
  23. #include "libavcodec/avcodec.h"
  24. #include "libavformat/avformat.h"
  25. #include "libavformat/avio.h"
  26. #include "libavutil/time.h"
  27. #include "libavutil/pixfmt.h"
  28. #include "libavutil/avstring.h"
  29. #include "libavutil/channel_layout.h"
  30. #include "libswscale/swscale.h"
  31. #include "libswresample/swresample.h"
  32. }
  33. #include "SDL.h"
  34. #include "AL/alc.h"
  35. #include "AL/al.h"
  36. #include "AL/alext.h"
  37. #include "common/alhelpers.h"
  38. extern "C" {
  39. /* Undefine this to disable use of experimental extensions. Don't use for
  40. * production code! Interfaces and behavior may change prior to being
  41. * finalized.
  42. */
  43. #define ALLOW_EXPERIMENTAL_EXTS
  44. #ifdef ALLOW_EXPERIMENTAL_EXTS
  45. #ifndef AL_SOFT_map_buffer
  46. #define AL_SOFT_map_buffer 1
  47. typedef unsigned int ALbitfieldSOFT;
  48. #define AL_MAP_READ_BIT_SOFT 0x00000001
  49. #define AL_MAP_WRITE_BIT_SOFT 0x00000002
  50. #define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
  51. #define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
  52. typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
  53. typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
  54. typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
  55. typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
  56. #endif
  57. #ifndef AL_SOFT_events
  58. #define AL_SOFT_events 1
  59. #define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
  60. #define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
  61. #define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
  62. #define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
  63. #define AL_EVENT_TYPE_ERROR_SOFT 0x1224
  64. #define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
  65. #define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
  66. #define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
  67. typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
  68. ALsizei length, const ALchar *message,
  69. void *userParam);
  70. typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
  71. typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
  72. typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
  73. typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
  74. #endif
  75. #endif /* ALLOW_EXPERIMENTAL_EXTS */
  76. }
  77. namespace {
  78. inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
  79. #ifndef M_PI
  80. #define M_PI (3.14159265358979323846)
  81. #endif
  82. using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>;
  83. using nanoseconds = std::chrono::nanoseconds;
  84. using microseconds = std::chrono::microseconds;
  85. using milliseconds = std::chrono::milliseconds;
  86. using seconds = std::chrono::seconds;
  87. using seconds_d64 = std::chrono::duration<double>;
  88. const std::string AppName{"alffplay"};
  89. bool EnableDirectOut{false};
  90. bool EnableWideStereo{false};
  91. LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
  92. LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
  93. #ifdef AL_SOFT_map_buffer
  94. LPALBUFFERSTORAGESOFT alBufferStorageSOFT;
  95. LPALMAPBUFFERSOFT alMapBufferSOFT;
  96. LPALUNMAPBUFFERSOFT alUnmapBufferSOFT;
  97. #endif
  98. #ifdef AL_SOFT_events
  99. LPALEVENTCONTROLSOFT alEventControlSOFT;
  100. LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
  101. #endif
  102. const seconds AVNoSyncThreshold{10};
  103. const milliseconds VideoSyncThreshold(10);
  104. #define VIDEO_PICTURE_QUEUE_SIZE 16
  105. const seconds_d64 AudioSyncThreshold{0.03};
  106. const milliseconds AudioSampleCorrectionMax{50};
  107. /* Averaging filter coefficient for audio sync. */
  108. #define AUDIO_DIFF_AVG_NB 20
  109. const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)};
  110. /* Per-buffer size, in time */
  111. const milliseconds AudioBufferTime{20};
  112. /* Buffer total size, in time (should be divisible by the buffer time) */
  113. const milliseconds AudioBufferTotalTime{800};
  114. #define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
  115. enum {
  116. FF_UPDATE_EVENT = SDL_USEREVENT,
  117. FF_REFRESH_EVENT,
  118. FF_MOVIE_DONE_EVENT
  119. };
  120. enum class SyncMaster {
  121. Audio,
  122. Video,
  123. External,
  124. Default = External
  125. };
  126. inline microseconds get_avtime()
  127. { return microseconds{av_gettime()}; }
  128. /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
  129. struct AVIOContextDeleter {
  130. void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
  131. };
  132. using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
  133. struct AVFormatCtxDeleter {
  134. void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
  135. };
  136. using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
  137. struct AVCodecCtxDeleter {
  138. void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
  139. };
  140. using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
  141. struct AVFrameDeleter {
  142. void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
  143. };
  144. using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
  145. struct SwrContextDeleter {
  146. void operator()(SwrContext *ptr) { swr_free(&ptr); }
  147. };
  148. using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
  149. struct SwsContextDeleter {
  150. void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
  151. };
  152. using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
  153. class PacketQueue {
  154. std::deque<AVPacket> mPackets;
  155. size_t mTotalSize{0};
  156. public:
  157. ~PacketQueue() { clear(); }
  158. bool empty() const noexcept { return mPackets.empty(); }
  159. size_t totalSize() const noexcept { return mTotalSize; }
  160. void put(const AVPacket *pkt)
  161. {
  162. mPackets.push_back(AVPacket{});
  163. if(av_packet_ref(&mPackets.back(), pkt) != 0)
  164. mPackets.pop_back();
  165. else
  166. mTotalSize += mPackets.back().size;
  167. }
  168. AVPacket *front() noexcept
  169. { return &mPackets.front(); }
  170. void pop()
  171. {
  172. AVPacket *pkt = &mPackets.front();
  173. mTotalSize -= pkt->size;
  174. av_packet_unref(pkt);
  175. mPackets.pop_front();
  176. }
  177. void clear()
  178. {
  179. for(AVPacket &pkt : mPackets)
  180. av_packet_unref(&pkt);
  181. mPackets.clear();
  182. mTotalSize = 0;
  183. }
  184. };
  185. struct MovieState;
  186. struct AudioState {
  187. MovieState &mMovie;
  188. AVStream *mStream{nullptr};
  189. AVCodecCtxPtr mCodecCtx;
  190. std::mutex mQueueMtx;
  191. std::condition_variable mQueueCond;
  192. /* Used for clock difference average computation */
  193. seconds_d64 mClockDiffAvg{0};
  194. /* Time of the next sample to be buffered */
  195. nanoseconds mCurrentPts{0};
  196. /* Device clock time that the stream started at. */
  197. nanoseconds mDeviceStartTime{nanoseconds::min()};
  198. /* Decompressed sample frame, and swresample context for conversion */
  199. AVFramePtr mDecodedFrame;
  200. SwrContextPtr mSwresCtx;
  201. /* Conversion format, for what gets fed to OpenAL */
  202. int mDstChanLayout{0};
  203. AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
  204. /* Storage of converted samples */
  205. uint8_t *mSamples{nullptr};
  206. int mSamplesLen{0}; /* In samples */
  207. int mSamplesPos{0};
  208. int mSamplesMax{0};
  209. /* OpenAL format */
  210. ALenum mFormat{AL_NONE};
  211. ALsizei mFrameSize{0};
  212. std::mutex mSrcMutex;
  213. std::condition_variable mSrcCond;
  214. std::atomic_flag mConnected;
  215. std::atomic<bool> mPrepared{false};
  216. ALuint mSource{0};
  217. std::vector<ALuint> mBuffers;
  218. ALsizei mBufferIdx{0};
  219. AudioState(MovieState &movie) : mMovie(movie)
  220. { mConnected.test_and_set(std::memory_order_relaxed); }
  221. ~AudioState()
  222. {
  223. if(mSource)
  224. alDeleteSources(1, &mSource);
  225. if(!mBuffers.empty())
  226. alDeleteBuffers(mBuffers.size(), mBuffers.data());
  227. av_freep(&mSamples);
  228. }
  229. #ifdef AL_SOFT_events
  230. static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
  231. ALsizei length, const ALchar *message,
  232. void *userParam);
  233. #endif
  234. nanoseconds getClockNoLock();
  235. nanoseconds getClock()
  236. {
  237. std::lock_guard<std::mutex> lock{mSrcMutex};
  238. return getClockNoLock();
  239. }
  240. bool isBufferFilled() const { return mPrepared.load(); }
  241. void startPlayback();
  242. int getSync();
  243. int decodeFrame();
  244. bool readAudio(uint8_t *samples, int length);
  245. int handler();
  246. };
  247. struct VideoState {
  248. MovieState &mMovie;
  249. AVStream *mStream{nullptr};
  250. AVCodecCtxPtr mCodecCtx;
  251. std::mutex mQueueMtx;
  252. std::condition_variable mQueueCond;
  253. nanoseconds mClock{0};
  254. nanoseconds mFrameTimer{0};
  255. nanoseconds mFrameLastPts{0};
  256. nanoseconds mFrameLastDelay{0};
  257. nanoseconds mCurrentPts{0};
  258. /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
  259. microseconds mCurrentPtsTime{0};
  260. /* Decompressed video frame, and swscale context for conversion */
  261. AVFramePtr mDecodedFrame;
  262. SwsContextPtr mSwscaleCtx;
  263. struct Picture {
  264. SDL_Texture *mImage{nullptr};
  265. int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
  266. std::atomic<bool> mUpdated{false};
  267. nanoseconds mPts{0};
  268. ~Picture()
  269. {
  270. if(mImage)
  271. SDL_DestroyTexture(mImage);
  272. mImage = nullptr;
  273. }
  274. };
  275. std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
  276. size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
  277. std::mutex mPictQMutex;
  278. std::condition_variable mPictQCond;
  279. bool mFirstUpdate{true};
  280. std::atomic<bool> mEOS{false};
  281. std::atomic<bool> mFinalUpdate{false};
  282. VideoState(MovieState &movie) : mMovie(movie) { }
  283. nanoseconds getClock();
  284. bool isBufferFilled();
  285. static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
  286. void schedRefresh(milliseconds delay);
  287. void display(SDL_Window *screen, SDL_Renderer *renderer);
  288. void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
  289. void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
  290. int queuePicture(nanoseconds pts);
  291. int handler();
  292. };
  293. struct MovieState {
  294. AVIOContextPtr mIOContext;
  295. AVFormatCtxPtr mFormatCtx;
  296. SyncMaster mAVSyncType{SyncMaster::Default};
  297. microseconds mClockBase{0};
  298. std::atomic<bool> mPlaying{false};
  299. std::mutex mSendMtx;
  300. std::condition_variable mSendCond;
  301. /* NOTE: false/clear = need data, true/set = no data needed */
  302. std::atomic_flag mSendDataGood;
  303. std::atomic<bool> mQuit{false};
  304. AudioState mAudio;
  305. VideoState mVideo;
  306. std::thread mParseThread;
  307. std::thread mAudioThread;
  308. std::thread mVideoThread;
  309. std::string mFilename;
  310. MovieState(std::string fname)
  311. : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
  312. { }
  313. ~MovieState()
  314. {
  315. mQuit = true;
  316. if(mParseThread.joinable())
  317. mParseThread.join();
  318. }
  319. static int decode_interrupt_cb(void *ctx);
  320. bool prepare();
  321. void setTitle(SDL_Window *window);
  322. nanoseconds getClock();
  323. nanoseconds getMasterClock();
  324. nanoseconds getDuration();
  325. int streamComponentOpen(int stream_index);
  326. int parse_handler();
  327. };
  328. nanoseconds AudioState::getClockNoLock()
  329. {
  330. // The audio clock is the timestamp of the sample currently being heard.
  331. if(alcGetInteger64vSOFT)
  332. {
  333. // If device start time = min, we aren't playing yet.
  334. if(mDeviceStartTime == nanoseconds::min())
  335. return nanoseconds::zero();
  336. // Get the current device clock time and latency.
  337. auto device = alcGetContextsDevice(alcGetCurrentContext());
  338. ALCint64SOFT devtimes[2]{0,0};
  339. alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
  340. auto latency = nanoseconds{devtimes[1]};
  341. auto device_time = nanoseconds{devtimes[0]};
  342. // The clock is simply the current device time relative to the recorded
  343. // start time. We can also subtract the latency to get more a accurate
  344. // position of where the audio device actually is in the output stream.
  345. std::max(device_time - mDeviceStartTime - latency, nanoseconds::zero());
  346. }
  347. /* The source-based clock is based on 4 components:
  348. * 1 - The timestamp of the next sample to buffer (mCurrentPts)
  349. * 2 - The length of the source's buffer queue
  350. * (AudioBufferTime*AL_BUFFERS_QUEUED)
  351. * 3 - The offset OpenAL is currently at in the source (the first value
  352. * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
  353. * 4 - The latency between OpenAL and the DAC (the second value from
  354. * AL_SAMPLE_OFFSET_LATENCY_SOFT)
  355. *
  356. * Subtracting the length of the source queue from the next sample's
  357. * timestamp gives the timestamp of the sample at the start of the source
  358. * queue. Adding the source offset to that results in the timestamp for the
  359. * sample at OpenAL's current position, and subtracting the source latency
  360. * from that gives the timestamp of the sample currently at the DAC.
  361. */
  362. nanoseconds pts{mCurrentPts};
  363. if(mSource)
  364. {
  365. ALint64SOFT offset[2];
  366. /* NOTE: The source state must be checked last, in case an underrun
  367. * occurs and the source stops between retrieving the offset+latency
  368. * and getting the state. */
  369. if(alGetSourcei64vSOFT)
  370. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
  371. else
  372. {
  373. ALint ioffset;
  374. alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
  375. offset[0] = ALint64SOFT{ioffset} << 32;
  376. offset[1] = 0;
  377. }
  378. ALint queued, status;
  379. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  380. alGetSourcei(mSource, AL_SOURCE_STATE, &status);
  381. /* If the source is AL_STOPPED, then there was an underrun and all
  382. * buffers are processed, so ignore the source queue. The audio thread
  383. * will put the source into an AL_INITIAL state and clear the queue
  384. * when it starts recovery. */
  385. if(status != AL_STOPPED)
  386. {
  387. pts -= AudioBufferTime*queued;
  388. pts += std::chrono::duration_cast<nanoseconds>(
  389. fixed32{offset[0] / mCodecCtx->sample_rate});
  390. }
  391. /* Don't offset by the latency if the source isn't playing. */
  392. if(status == AL_PLAYING)
  393. pts -= nanoseconds{offset[1]};
  394. }
  395. return std::max(pts, nanoseconds::zero());
  396. }
  397. void AudioState::startPlayback()
  398. {
  399. alSourcePlay(mSource);
  400. if(alcGetInteger64vSOFT)
  401. {
  402. // Subtract the total buffer queue time from the current pts to get the
  403. // pts of the start of the queue.
  404. nanoseconds startpts{mCurrentPts - AudioBufferTotalTime};
  405. int64_t srctimes[2]{0,0};
  406. alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
  407. auto device_time = nanoseconds{srctimes[1]};
  408. auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
  409. mCodecCtx->sample_rate;
  410. // The mixer may have ticked and incremented the device time and sample
  411. // offset, so subtract the source offset from the device time to get
  412. // the device time the source started at. Also subtract startpts to get
  413. // the device time the stream would have started at to reach where it
  414. // is now.
  415. mDeviceStartTime = device_time - src_offset - startpts;
  416. }
  417. }
  418. int AudioState::getSync()
  419. {
  420. if(mMovie.mAVSyncType == SyncMaster::Audio)
  421. return 0;
  422. auto ref_clock = mMovie.getMasterClock();
  423. auto diff = ref_clock - getClockNoLock();
  424. if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
  425. {
  426. /* Difference is TOO big; reset accumulated average */
  427. mClockDiffAvg = seconds_d64::zero();
  428. return 0;
  429. }
  430. /* Accumulate the diffs */
  431. mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
  432. auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
  433. if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
  434. return 0;
  435. /* Constrain the per-update difference to avoid exceedingly large skips */
  436. diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
  437. AudioSampleCorrectionMax);
  438. return static_cast<int>(std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
  439. }
  440. int AudioState::decodeFrame()
  441. {
  442. while(!mMovie.mQuit.load(std::memory_order_relaxed))
  443. {
  444. std::unique_lock<std::mutex> lock(mQueueMtx);
  445. int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  446. if(ret == AVERROR(EAGAIN))
  447. {
  448. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  449. std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
  450. mMovie.mSendCond.notify_one();
  451. do {
  452. mQueueCond.wait(lock);
  453. ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  454. } while(ret == AVERROR(EAGAIN));
  455. }
  456. lock.unlock();
  457. if(ret == AVERROR_EOF) break;
  458. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  459. mMovie.mSendCond.notify_one();
  460. if(ret < 0)
  461. {
  462. std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
  463. return 0;
  464. }
  465. if(mDecodedFrame->nb_samples <= 0)
  466. {
  467. av_frame_unref(mDecodedFrame.get());
  468. continue;
  469. }
  470. /* If provided, update w/ pts */
  471. if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
  472. mCurrentPts = std::chrono::duration_cast<nanoseconds>(
  473. seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
  474. );
  475. if(mDecodedFrame->nb_samples > mSamplesMax)
  476. {
  477. av_freep(&mSamples);
  478. av_samples_alloc(
  479. &mSamples, nullptr, mCodecCtx->channels,
  480. mDecodedFrame->nb_samples, mDstSampleFmt, 0
  481. );
  482. mSamplesMax = mDecodedFrame->nb_samples;
  483. }
  484. /* Return the amount of sample frames converted */
  485. int data_size{swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
  486. const_cast<const uint8_t**>(mDecodedFrame->data), mDecodedFrame->nb_samples)};
  487. av_frame_unref(mDecodedFrame.get());
  488. return data_size;
  489. }
  490. return 0;
  491. }
  492. /* Duplicates the sample at in to out, count times. The frame size is a
  493. * multiple of the template type size.
  494. */
  495. template<typename T>
  496. static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
  497. {
  498. const T *sample = reinterpret_cast<const T*>(in);
  499. T *dst = reinterpret_cast<T*>(out);
  500. if(frame_size == sizeof(T))
  501. std::fill_n(dst, count, *sample);
  502. else
  503. {
  504. /* NOTE: frame_size is a multiple of sizeof(T). */
  505. int type_mult = frame_size / sizeof(T);
  506. int i = 0;
  507. std::generate_n(dst, count*type_mult,
  508. [sample,type_mult,&i]() -> T
  509. {
  510. T ret = sample[i];
  511. i = (i+1)%type_mult;
  512. return ret;
  513. }
  514. );
  515. }
  516. }
  517. bool AudioState::readAudio(uint8_t *samples, int length)
  518. {
  519. int sample_skip = getSync();
  520. int audio_size = 0;
  521. /* Read the next chunk of data, refill the buffer, and queue it
  522. * on the source */
  523. length /= mFrameSize;
  524. while(audio_size < length)
  525. {
  526. if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
  527. {
  528. int frame_len = decodeFrame();
  529. if(frame_len <= 0) break;
  530. mSamplesLen = frame_len;
  531. mSamplesPos = std::min(mSamplesLen, sample_skip);
  532. sample_skip -= mSamplesPos;
  533. // Adjust the device start time and current pts by the amount we're
  534. // skipping/duplicating, so that the clock remains correct for the
  535. // current stream position.
  536. auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
  537. mDeviceStartTime -= skip;
  538. mCurrentPts += skip;
  539. continue;
  540. }
  541. int rem = length - audio_size;
  542. if(mSamplesPos >= 0)
  543. {
  544. int len = mSamplesLen - mSamplesPos;
  545. if(rem > len) rem = len;
  546. memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
  547. }
  548. else
  549. {
  550. rem = std::min(rem, -mSamplesPos);
  551. /* Add samples by copying the first sample */
  552. if((mFrameSize&7) == 0)
  553. sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
  554. else if((mFrameSize&3) == 0)
  555. sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
  556. else if((mFrameSize&1) == 0)
  557. sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
  558. else
  559. sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
  560. }
  561. mSamplesPos += rem;
  562. mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
  563. samples += rem*mFrameSize;
  564. audio_size += rem;
  565. }
  566. if(audio_size <= 0)
  567. return false;
  568. if(audio_size < length)
  569. {
  570. int rem = length - audio_size;
  571. std::fill_n(samples, rem*mFrameSize,
  572. (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
  573. mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
  574. audio_size += rem;
  575. }
  576. return true;
  577. }
  578. #ifdef AL_SOFT_events
  579. void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
  580. ALsizei length, const ALchar *message,
  581. void *userParam)
  582. {
  583. AudioState *self = reinterpret_cast<AudioState*>(userParam);
  584. if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
  585. {
  586. /* Temporarily lock the source mutex to ensure it's not between
  587. * checking the processed count and going to sleep.
  588. */
  589. std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
  590. self->mSrcCond.notify_one();
  591. return;
  592. }
  593. std::cout<< "\n---- AL Event on AudioState "<<self<<" ----\nEvent: ";
  594. switch(eventType)
  595. {
  596. case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
  597. case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
  598. case AL_EVENT_TYPE_ERROR_SOFT: std::cout<< "API error"; break;
  599. case AL_EVENT_TYPE_PERFORMANCE_SOFT: std::cout<< "Performance"; break;
  600. case AL_EVENT_TYPE_DEPRECATED_SOFT: std::cout<< "Deprecated"; break;
  601. case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
  602. default: std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<
  603. std::dec<<std::setw(0)<<std::setfill(' '); break;
  604. }
  605. std::cout<< "\n"
  606. "Object ID: "<<object<<"\n"
  607. "Parameter: "<<param<<"\n"
  608. "Message: "<<std::string(message, length)<<"\n----"<<
  609. std::endl;
  610. if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
  611. {
  612. { std::lock_guard<std::mutex> lock(self->mSrcMutex);
  613. self->mConnected.clear(std::memory_order_release);
  614. }
  615. std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
  616. self->mSrcCond.notify_one();
  617. }
  618. }
  619. #endif
  620. int AudioState::handler()
  621. {
  622. std::unique_lock<std::mutex> srclock(mSrcMutex);
  623. milliseconds sleep_time = AudioBufferTime / 3;
  624. ALenum fmt;
  625. #ifdef AL_SOFT_events
  626. const std::array<ALenum,6> evt_types{{
  627. AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
  628. AL_EVENT_TYPE_ERROR_SOFT, AL_EVENT_TYPE_PERFORMANCE_SOFT, AL_EVENT_TYPE_DEPRECATED_SOFT,
  629. AL_EVENT_TYPE_DISCONNECTED_SOFT
  630. }};
  631. if(alEventControlSOFT)
  632. {
  633. alEventControlSOFT(evt_types.size(), evt_types.data(), AL_TRUE);
  634. alEventCallbackSOFT(EventCallback, this);
  635. sleep_time = AudioBufferTotalTime;
  636. }
  637. #endif
  638. /* Find a suitable format for OpenAL. */
  639. mDstChanLayout = 0;
  640. mFormat = AL_NONE;
  641. if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
  642. alIsExtensionPresent("AL_EXT_FLOAT32"))
  643. {
  644. mDstSampleFmt = AV_SAMPLE_FMT_FLT;
  645. mFrameSize = 4;
  646. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  647. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  648. (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
  649. {
  650. mDstChanLayout = mCodecCtx->channel_layout;
  651. mFrameSize *= 8;
  652. mFormat = fmt;
  653. }
  654. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  655. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  656. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  657. (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
  658. {
  659. mDstChanLayout = mCodecCtx->channel_layout;
  660. mFrameSize *= 6;
  661. mFormat = fmt;
  662. }
  663. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  664. {
  665. mDstChanLayout = mCodecCtx->channel_layout;
  666. mFrameSize *= 1;
  667. mFormat = AL_FORMAT_MONO_FLOAT32;
  668. }
  669. /* Assume 3D B-Format (ambisonics) if the channel layout is blank and
  670. * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to
  671. * have no way to specify if the source is actually B-Format (let alone
  672. * if it's 2D or 3D).
  673. */
  674. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
  675. alIsExtensionPresent("AL_EXT_BFORMAT") &&
  676. (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32")) != AL_NONE && fmt != -1)
  677. {
  678. int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
  679. if((order+1)*(order+1) == mCodecCtx->channels ||
  680. (order+1)*(order+1) + 2 == mCodecCtx->channels)
  681. {
  682. /* OpenAL only supports first-order with AL_EXT_BFORMAT, which
  683. * is 4 channels for 3D buffers.
  684. */
  685. mFrameSize *= 4;
  686. mFormat = fmt;
  687. }
  688. }
  689. if(!mFormat)
  690. {
  691. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  692. mFrameSize *= 2;
  693. mFormat = AL_FORMAT_STEREO_FLOAT32;
  694. }
  695. }
  696. if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
  697. {
  698. mDstSampleFmt = AV_SAMPLE_FMT_U8;
  699. mFrameSize = 1;
  700. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  701. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  702. (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
  703. {
  704. mDstChanLayout = mCodecCtx->channel_layout;
  705. mFrameSize *= 8;
  706. mFormat = fmt;
  707. }
  708. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  709. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  710. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  711. (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
  712. {
  713. mDstChanLayout = mCodecCtx->channel_layout;
  714. mFrameSize *= 6;
  715. mFormat = fmt;
  716. }
  717. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  718. {
  719. mDstChanLayout = mCodecCtx->channel_layout;
  720. mFrameSize *= 1;
  721. mFormat = AL_FORMAT_MONO8;
  722. }
  723. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
  724. alIsExtensionPresent("AL_EXT_BFORMAT") &&
  725. (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D8")) != AL_NONE && fmt != -1)
  726. {
  727. int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
  728. if((order+1)*(order+1) == mCodecCtx->channels ||
  729. (order+1)*(order+1) + 2 == mCodecCtx->channels)
  730. {
  731. mFrameSize *= 4;
  732. mFormat = fmt;
  733. }
  734. }
  735. if(!mFormat)
  736. {
  737. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  738. mFrameSize *= 2;
  739. mFormat = AL_FORMAT_STEREO8;
  740. }
  741. }
  742. if(!mFormat)
  743. {
  744. mDstSampleFmt = AV_SAMPLE_FMT_S16;
  745. mFrameSize = 2;
  746. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
  747. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  748. (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
  749. {
  750. mDstChanLayout = mCodecCtx->channel_layout;
  751. mFrameSize *= 8;
  752. mFormat = fmt;
  753. }
  754. if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
  755. mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
  756. alIsExtensionPresent("AL_EXT_MCFORMATS") &&
  757. (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
  758. {
  759. mDstChanLayout = mCodecCtx->channel_layout;
  760. mFrameSize *= 6;
  761. mFormat = fmt;
  762. }
  763. if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
  764. {
  765. mDstChanLayout = mCodecCtx->channel_layout;
  766. mFrameSize *= 1;
  767. mFormat = AL_FORMAT_MONO16;
  768. }
  769. if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 &&
  770. alIsExtensionPresent("AL_EXT_BFORMAT") &&
  771. (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D16")) != AL_NONE && fmt != -1)
  772. {
  773. int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1};
  774. if((order+1)*(order+1) == mCodecCtx->channels ||
  775. (order+1)*(order+1) + 2 == mCodecCtx->channels)
  776. {
  777. mFrameSize *= 4;
  778. mFormat = fmt;
  779. }
  780. }
  781. if(!mFormat)
  782. {
  783. mDstChanLayout = AV_CH_LAYOUT_STEREO;
  784. mFrameSize *= 2;
  785. mFormat = AL_FORMAT_STEREO16;
  786. }
  787. }
  788. void *samples = nullptr;
  789. ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
  790. mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
  791. mSamples = nullptr;
  792. mSamplesMax = 0;
  793. mSamplesPos = 0;
  794. mSamplesLen = 0;
  795. mDecodedFrame.reset(av_frame_alloc());
  796. if(!mDecodedFrame)
  797. {
  798. std::cerr<< "Failed to allocate audio frame" <<std::endl;
  799. goto finish;
  800. }
  801. if(!mDstChanLayout)
  802. {
  803. /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so
  804. * we have to drop any extra channels. It also only supports FuMa
  805. * channel ordering and normalization, so a custom matrix is needed to
  806. * scale and reorder the source from AmbiX.
  807. */
  808. mSwresCtx.reset(swr_alloc_set_opts(nullptr,
  809. (1_i64<<4)-1, mDstSampleFmt, mCodecCtx->sample_rate,
  810. (1_i64<<mCodecCtx->channels)-1, mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
  811. 0, nullptr));
  812. /* Note that ffmpeg/libavcodec has no method to check the ambisonic
  813. * channel order and normalization, so we can only assume AmbiX as the
  814. * defacto-standard. This is not true for .amb files, which use FuMa.
  815. */
  816. std::vector<double> mtx(64*64, 0.0);
  817. mtx[0 + 0*64] = std::sqrt(0.5);
  818. mtx[3 + 1*64] = 1.0;
  819. mtx[1 + 2*64] = 1.0;
  820. mtx[2 + 3*64] = 1.0;
  821. swr_set_matrix(mSwresCtx.get(), mtx.data(), 64);
  822. }
  823. else
  824. mSwresCtx.reset(swr_alloc_set_opts(nullptr,
  825. mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
  826. mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
  827. static_cast<uint64_t>(av_get_default_channel_layout(mCodecCtx->channels)),
  828. mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
  829. 0, nullptr));
  830. if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
  831. {
  832. std::cerr<< "Failed to initialize audio converter" <<std::endl;
  833. goto finish;
  834. }
  835. mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
  836. alGenBuffers(mBuffers.size(), mBuffers.data());
  837. alGenSources(1, &mSource);
  838. if(EnableDirectOut)
  839. alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
  840. if (EnableWideStereo) {
  841. ALfloat angles[2] = {static_cast<ALfloat>(M_PI / 3.0),
  842. static_cast<ALfloat>(-M_PI / 3.0)};
  843. alSourcefv(mSource, AL_STEREO_ANGLES, angles);
  844. }
  845. if(alGetError() != AL_NO_ERROR)
  846. goto finish;
  847. #ifdef AL_SOFT_map_buffer
  848. if(alBufferStorageSOFT)
  849. {
  850. for(ALuint bufid : mBuffers)
  851. alBufferStorageSOFT(bufid, mFormat, nullptr, buffer_len, mCodecCtx->sample_rate,
  852. AL_MAP_WRITE_BIT_SOFT);
  853. if(alGetError() != AL_NO_ERROR)
  854. {
  855. fprintf(stderr, "Failed to use mapped buffers\n");
  856. samples = av_malloc(buffer_len);
  857. }
  858. }
  859. else
  860. #endif
  861. samples = av_malloc(buffer_len);
  862. while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) &&
  863. mConnected.test_and_set(std::memory_order_relaxed))
  864. {
  865. /* First remove any processed buffers. */
  866. ALint processed;
  867. alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
  868. while(processed > 0)
  869. {
  870. std::array<ALuint,4> bids;
  871. alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
  872. bids.data());
  873. processed -= std::min<ALsizei>(bids.size(), processed);
  874. }
  875. /* Refill the buffer queue. */
  876. ALint queued;
  877. alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
  878. while(static_cast<ALuint>(queued) < mBuffers.size())
  879. {
  880. ALuint bufid = mBuffers[mBufferIdx];
  881. uint8_t *ptr = reinterpret_cast<uint8_t*>(samples
  882. #ifdef AL_SOFT_map_buffer
  883. ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT)
  884. #endif
  885. );
  886. if(!ptr) break;
  887. /* Read the next chunk of data, filling the buffer, and queue it on
  888. * the source */
  889. bool got_audio = readAudio(ptr, buffer_len);
  890. #ifdef AL_SOFT_map_buffer
  891. if(!samples) alUnmapBufferSOFT(bufid);
  892. #endif
  893. if(!got_audio) break;
  894. if(samples)
  895. alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
  896. alSourceQueueBuffers(mSource, 1, &bufid);
  897. mBufferIdx = (mBufferIdx+1) % mBuffers.size();
  898. ++queued;
  899. }
  900. if(queued == 0)
  901. break;
  902. /* Check that the source is playing. */
  903. ALint state;
  904. alGetSourcei(mSource, AL_SOURCE_STATE, &state);
  905. if(state == AL_STOPPED)
  906. {
  907. /* AL_STOPPED means there was an underrun. Clear the buffer queue
  908. * since this likely means we're late, and rewind the source to get
  909. * it back into an AL_INITIAL state.
  910. */
  911. alSourceRewind(mSource);
  912. alSourcei(mSource, AL_BUFFER, 0);
  913. if(alcGetInteger64vSOFT)
  914. {
  915. /* Also update the device start time with the current device
  916. * clock, so the decoder knows we're running behind.
  917. */
  918. int64_t devtime{};
  919. alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()),
  920. ALC_DEVICE_CLOCK_SOFT, 1, &devtime);
  921. mDeviceStartTime = nanoseconds{devtime} - mCurrentPts;
  922. }
  923. continue;
  924. }
  925. /* (re)start the source if needed, and wait for a buffer to finish */
  926. if(state != AL_PLAYING && state != AL_PAUSED)
  927. {
  928. if(mMovie.mPlaying.load(std::memory_order_relaxed))
  929. startPlayback();
  930. else
  931. mPrepared.store(true);
  932. }
  933. mSrcCond.wait_for(srclock, sleep_time);
  934. }
  935. alSourceRewind(mSource);
  936. alSourcei(mSource, AL_BUFFER, 0);
  937. finish:
  938. av_freep(&samples);
  939. srclock.unlock();
  940. #ifdef AL_SOFT_events
  941. if(alEventControlSOFT)
  942. {
  943. alEventControlSOFT(evt_types.size(), evt_types.data(), AL_FALSE);
  944. alEventCallbackSOFT(nullptr, nullptr);
  945. }
  946. #endif
  947. return 0;
  948. }
  949. nanoseconds VideoState::getClock()
  950. {
  951. /* NOTE: This returns incorrect times while not playing. */
  952. auto delta = get_avtime() - mCurrentPtsTime;
  953. return mCurrentPts + delta;
  954. }
  955. bool VideoState::isBufferFilled()
  956. {
  957. std::unique_lock<std::mutex> lock(mPictQMutex);
  958. return mPictQSize >= mPictQ.size();
  959. }
  960. Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
  961. {
  962. SDL_Event evt{};
  963. evt.user.type = FF_REFRESH_EVENT;
  964. evt.user.data1 = opaque;
  965. SDL_PushEvent(&evt);
  966. return 0; /* 0 means stop timer */
  967. }
  968. /* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
  969. void VideoState::schedRefresh(milliseconds delay)
  970. {
  971. SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
  972. }
  973. /* Called by VideoState::refreshTimer to display the next video frame. */
  974. void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
  975. {
  976. Picture *vp = &mPictQ[mPictQRead];
  977. if(!vp->mImage)
  978. return;
  979. float aspect_ratio;
  980. int win_w, win_h;
  981. int w, h, x, y;
  982. if(mCodecCtx->sample_aspect_ratio.num == 0)
  983. aspect_ratio = 0.0f;
  984. else
  985. {
  986. aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
  987. mCodecCtx->height;
  988. }
  989. if(aspect_ratio <= 0.0f)
  990. aspect_ratio = static_cast<float>(mCodecCtx->width) / static_cast<float>(mCodecCtx->height);
  991. SDL_GetWindowSize(screen, &win_w, &win_h);
  992. h = win_h;
  993. w = (static_cast<int>(rint(h * aspect_ratio)) + 3) & ~3;
  994. if(w > win_w)
  995. {
  996. w = win_w;
  997. h = (static_cast<int>(rint(w / aspect_ratio)) + 3) & ~3;
  998. }
  999. x = (win_w - w) / 2;
  1000. y = (win_h - h) / 2;
  1001. SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
  1002. SDL_Rect dst_rect{ x, y, w, h };
  1003. SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
  1004. SDL_RenderPresent(renderer);
  1005. }
  1006. /* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
  1007. * was created. It handles the display of the next decoded video frame (if not
  1008. * falling behind), and sets up the timer for the following video frame.
  1009. */
  1010. void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
  1011. {
  1012. if(!mStream)
  1013. {
  1014. if(mEOS)
  1015. {
  1016. mFinalUpdate = true;
  1017. std::unique_lock<std::mutex>(mPictQMutex).unlock();
  1018. mPictQCond.notify_all();
  1019. return;
  1020. }
  1021. schedRefresh(milliseconds(100));
  1022. return;
  1023. }
  1024. if(!mMovie.mPlaying.load(std::memory_order_relaxed))
  1025. {
  1026. schedRefresh(milliseconds(1));
  1027. return;
  1028. }
  1029. std::unique_lock<std::mutex> lock(mPictQMutex);
  1030. retry:
  1031. if(mPictQSize == 0)
  1032. {
  1033. if(mEOS)
  1034. mFinalUpdate = true;
  1035. else
  1036. schedRefresh(milliseconds(1));
  1037. lock.unlock();
  1038. mPictQCond.notify_all();
  1039. return;
  1040. }
  1041. Picture *vp = &mPictQ[mPictQRead];
  1042. mCurrentPts = vp->mPts;
  1043. mCurrentPtsTime = get_avtime();
  1044. /* Get delay using the frame pts and the pts from last frame. */
  1045. auto delay = vp->mPts - mFrameLastPts;
  1046. if(delay <= seconds::zero() || delay >= seconds(1))
  1047. {
  1048. /* If incorrect delay, use previous one. */
  1049. delay = mFrameLastDelay;
  1050. }
  1051. /* Save for next frame. */
  1052. mFrameLastDelay = delay;
  1053. mFrameLastPts = vp->mPts;
  1054. /* Update delay to sync to clock if not master source. */
  1055. if(mMovie.mAVSyncType != SyncMaster::Video)
  1056. {
  1057. auto ref_clock = mMovie.getMasterClock();
  1058. auto diff = vp->mPts - ref_clock;
  1059. /* Skip or repeat the frame. Take delay into account. */
  1060. auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
  1061. if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
  1062. {
  1063. if(diff <= -sync_threshold)
  1064. delay = nanoseconds::zero();
  1065. else if(diff >= sync_threshold)
  1066. delay *= 2;
  1067. }
  1068. }
  1069. mFrameTimer += delay;
  1070. /* Compute the REAL delay. */
  1071. auto actual_delay = mFrameTimer - get_avtime();
  1072. if(!(actual_delay >= VideoSyncThreshold))
  1073. {
  1074. /* We don't have time to handle this picture, just skip to the next one. */
  1075. mPictQRead = (mPictQRead+1)%mPictQ.size();
  1076. mPictQSize--;
  1077. goto retry;
  1078. }
  1079. schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
  1080. /* Show the picture! */
  1081. display(screen, renderer);
  1082. /* Update queue for next picture. */
  1083. mPictQRead = (mPictQRead+1)%mPictQ.size();
  1084. mPictQSize--;
  1085. lock.unlock();
  1086. mPictQCond.notify_all();
  1087. }
  1088. /* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
  1089. * main thread where the renderer was created.
  1090. */
  1091. void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
  1092. {
  1093. Picture *vp = &mPictQ[mPictQWrite];
  1094. bool fmt_updated = false;
  1095. /* allocate or resize the buffer! */
  1096. if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
  1097. {
  1098. fmt_updated = true;
  1099. if(vp->mImage)
  1100. SDL_DestroyTexture(vp->mImage);
  1101. vp->mImage = SDL_CreateTexture(
  1102. renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
  1103. mCodecCtx->coded_width, mCodecCtx->coded_height
  1104. );
  1105. if(!vp->mImage)
  1106. std::cerr<< "Failed to create YV12 texture!" <<std::endl;
  1107. vp->mWidth = mCodecCtx->width;
  1108. vp->mHeight = mCodecCtx->height;
  1109. if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
  1110. {
  1111. /* For the first update, set the window size to the video size. */
  1112. mFirstUpdate = false;
  1113. int w = vp->mWidth;
  1114. int h = vp->mHeight;
  1115. if(mCodecCtx->sample_aspect_ratio.den != 0)
  1116. {
  1117. double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
  1118. if(aspect_ratio >= 1.0)
  1119. w = static_cast<int>(w*aspect_ratio + 0.5);
  1120. else if(aspect_ratio > 0.0)
  1121. h = static_cast<int>(h/aspect_ratio + 0.5);
  1122. }
  1123. SDL_SetWindowSize(screen, w, h);
  1124. }
  1125. }
  1126. if(vp->mImage)
  1127. {
  1128. AVFrame *frame = mDecodedFrame.get();
  1129. void *pixels = nullptr;
  1130. int pitch = 0;
  1131. if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
  1132. SDL_UpdateYUVTexture(vp->mImage, nullptr,
  1133. frame->data[0], frame->linesize[0],
  1134. frame->data[1], frame->linesize[1],
  1135. frame->data[2], frame->linesize[2]
  1136. );
  1137. else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
  1138. std::cerr<< "Failed to lock texture" <<std::endl;
  1139. else
  1140. {
  1141. // Convert the image into YUV format that SDL uses
  1142. int coded_w = mCodecCtx->coded_width;
  1143. int coded_h = mCodecCtx->coded_height;
  1144. int w = mCodecCtx->width;
  1145. int h = mCodecCtx->height;
  1146. if(!mSwscaleCtx || fmt_updated)
  1147. {
  1148. mSwscaleCtx.reset(sws_getContext(
  1149. w, h, mCodecCtx->pix_fmt,
  1150. w, h, AV_PIX_FMT_YUV420P, 0,
  1151. nullptr, nullptr, nullptr
  1152. ));
  1153. }
  1154. /* point pict at the queue */
  1155. uint8_t *pict_data[3];
  1156. pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
  1157. pict_data[1] = pict_data[0] + coded_w*coded_h;
  1158. pict_data[2] = pict_data[1] + coded_w*coded_h/4;
  1159. int pict_linesize[3];
  1160. pict_linesize[0] = pitch;
  1161. pict_linesize[1] = pitch / 2;
  1162. pict_linesize[2] = pitch / 2;
  1163. sws_scale(mSwscaleCtx.get(), reinterpret_cast<uint8_t**>(frame->data),
  1164. frame->linesize, 0, h, pict_data, pict_linesize);
  1165. SDL_UnlockTexture(vp->mImage);
  1166. }
  1167. }
  1168. vp->mUpdated.store(true, std::memory_order_release);
  1169. std::unique_lock<std::mutex>(mPictQMutex).unlock();
  1170. mPictQCond.notify_one();
  1171. }
  1172. int VideoState::queuePicture(nanoseconds pts)
  1173. {
  1174. /* Wait until we have space for a new pic */
  1175. std::unique_lock<std::mutex> lock(mPictQMutex);
  1176. while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
  1177. mPictQCond.wait(lock);
  1178. lock.unlock();
  1179. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1180. return -1;
  1181. Picture *vp = &mPictQ[mPictQWrite];
  1182. /* We have to create/update the picture in the main thread */
  1183. vp->mUpdated.store(false, std::memory_order_relaxed);
  1184. SDL_Event evt{};
  1185. evt.user.type = FF_UPDATE_EVENT;
  1186. evt.user.data1 = this;
  1187. SDL_PushEvent(&evt);
  1188. /* Wait until the picture is updated. */
  1189. lock.lock();
  1190. while(!vp->mUpdated.load(std::memory_order_relaxed))
  1191. {
  1192. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1193. return -1;
  1194. mPictQCond.wait(lock);
  1195. }
  1196. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1197. return -1;
  1198. vp->mPts = pts;
  1199. mPictQWrite = (mPictQWrite+1)%mPictQ.size();
  1200. mPictQSize++;
  1201. lock.unlock();
  1202. return 0;
  1203. }
  1204. int VideoState::handler()
  1205. {
  1206. mDecodedFrame.reset(av_frame_alloc());
  1207. while(!mMovie.mQuit.load(std::memory_order_relaxed))
  1208. {
  1209. std::unique_lock<std::mutex> lock(mQueueMtx);
  1210. /* Decode video frame */
  1211. int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  1212. if(ret == AVERROR(EAGAIN))
  1213. {
  1214. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  1215. std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
  1216. mMovie.mSendCond.notify_one();
  1217. do {
  1218. mQueueCond.wait(lock);
  1219. ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
  1220. } while(ret == AVERROR(EAGAIN));
  1221. }
  1222. lock.unlock();
  1223. if(ret == AVERROR_EOF) break;
  1224. mMovie.mSendDataGood.clear(std::memory_order_relaxed);
  1225. mMovie.mSendCond.notify_one();
  1226. if(ret < 0)
  1227. {
  1228. std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
  1229. continue;
  1230. }
  1231. /* Get the PTS for this frame. */
  1232. nanoseconds pts;
  1233. if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
  1234. mClock = std::chrono::duration_cast<nanoseconds>(
  1235. seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
  1236. );
  1237. pts = mClock;
  1238. /* Update the video clock to the next expected PTS. */
  1239. auto frame_delay = av_q2d(mCodecCtx->time_base);
  1240. frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
  1241. mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
  1242. if(queuePicture(pts) < 0)
  1243. break;
  1244. av_frame_unref(mDecodedFrame.get());
  1245. }
  1246. mEOS = true;
  1247. std::unique_lock<std::mutex> lock(mPictQMutex);
  1248. if(mMovie.mQuit.load(std::memory_order_relaxed))
  1249. {
  1250. mPictQRead = 0;
  1251. mPictQWrite = 0;
  1252. mPictQSize = 0;
  1253. }
  1254. while(!mFinalUpdate)
  1255. mPictQCond.wait(lock);
  1256. return 0;
  1257. }
  1258. int MovieState::decode_interrupt_cb(void *ctx)
  1259. {
  1260. return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
  1261. }
  1262. bool MovieState::prepare()
  1263. {
  1264. AVIOContext *avioctx = nullptr;
  1265. AVIOInterruptCB intcb = { decode_interrupt_cb, this };
  1266. if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
  1267. {
  1268. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1269. return false;
  1270. }
  1271. mIOContext.reset(avioctx);
  1272. /* Open movie file. If avformat_open_input fails it will automatically free
  1273. * this context, so don't set it onto a smart pointer yet.
  1274. */
  1275. AVFormatContext *fmtctx = avformat_alloc_context();
  1276. fmtctx->pb = mIOContext.get();
  1277. fmtctx->interrupt_callback = intcb;
  1278. if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
  1279. {
  1280. std::cerr<< "Failed to open "<<mFilename <<std::endl;
  1281. return false;
  1282. }
  1283. mFormatCtx.reset(fmtctx);
  1284. /* Retrieve stream information */
  1285. if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
  1286. {
  1287. std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
  1288. return false;
  1289. }
  1290. mVideo.schedRefresh(milliseconds(40));
  1291. mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
  1292. return true;
  1293. }
  1294. void MovieState::setTitle(SDL_Window *window)
  1295. {
  1296. auto pos1 = mFilename.rfind('/');
  1297. auto pos2 = mFilename.rfind('\\');
  1298. auto fpos = ((pos1 == std::string::npos) ? pos2 :
  1299. (pos2 == std::string::npos) ? pos1 :
  1300. std::max(pos1, pos2)) + 1;
  1301. SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
  1302. }
  1303. nanoseconds MovieState::getClock()
  1304. {
  1305. if(!mPlaying.load(std::memory_order_relaxed))
  1306. return nanoseconds::zero();
  1307. return get_avtime() - mClockBase;
  1308. }
  1309. nanoseconds MovieState::getMasterClock()
  1310. {
  1311. if(mAVSyncType == SyncMaster::Video)
  1312. return mVideo.getClock();
  1313. if(mAVSyncType == SyncMaster::Audio)
  1314. return mAudio.getClock();
  1315. return getClock();
  1316. }
  1317. nanoseconds MovieState::getDuration()
  1318. { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
  1319. int MovieState::streamComponentOpen(int stream_index)
  1320. {
  1321. if(stream_index < 0 || static_cast<unsigned int>(stream_index) >= mFormatCtx->nb_streams)
  1322. return -1;
  1323. /* Get a pointer to the codec context for the stream, and open the
  1324. * associated codec.
  1325. */
  1326. AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
  1327. if(!avctx) return -1;
  1328. if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
  1329. return -1;
  1330. AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
  1331. if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
  1332. {
  1333. std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
  1334. << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
  1335. return -1;
  1336. }
  1337. /* Initialize and start the media type handler */
  1338. switch(avctx->codec_type)
  1339. {
  1340. case AVMEDIA_TYPE_AUDIO:
  1341. mAudio.mStream = mFormatCtx->streams[stream_index];
  1342. mAudio.mCodecCtx = std::move(avctx);
  1343. mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
  1344. break;
  1345. case AVMEDIA_TYPE_VIDEO:
  1346. mVideo.mStream = mFormatCtx->streams[stream_index];
  1347. mVideo.mCodecCtx = std::move(avctx);
  1348. mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
  1349. break;
  1350. default:
  1351. return -1;
  1352. }
  1353. return stream_index;
  1354. }
  1355. int MovieState::parse_handler()
  1356. {
  1357. int video_index = -1;
  1358. int audio_index = -1;
  1359. /* Dump information about file onto standard error */
  1360. av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
  1361. /* Find the first video and audio streams */
  1362. for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
  1363. {
  1364. auto codecpar = mFormatCtx->streams[i]->codecpar;
  1365. if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
  1366. video_index = streamComponentOpen(i);
  1367. else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
  1368. audio_index = streamComponentOpen(i);
  1369. }
  1370. if(video_index < 0 && audio_index < 0)
  1371. {
  1372. std::cerr<< mFilename<<": could not open codecs" <<std::endl;
  1373. mQuit = true;
  1374. }
  1375. PacketQueue audio_queue, video_queue;
  1376. bool input_finished = false;
  1377. /* Main packet reading/dispatching loop */
  1378. while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
  1379. {
  1380. AVPacket packet;
  1381. if(av_read_frame(mFormatCtx.get(), &packet) < 0)
  1382. input_finished = true;
  1383. else
  1384. {
  1385. /* Copy the packet into the queue it's meant for. */
  1386. if(packet.stream_index == video_index)
  1387. video_queue.put(&packet);
  1388. else if(packet.stream_index == audio_index)
  1389. audio_queue.put(&packet);
  1390. av_packet_unref(&packet);
  1391. }
  1392. do {
  1393. /* Send whatever queued packets we have. */
  1394. if(!audio_queue.empty())
  1395. {
  1396. std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
  1397. int ret;
  1398. do {
  1399. ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
  1400. if(ret != AVERROR(EAGAIN)) audio_queue.pop();
  1401. } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
  1402. lock.unlock();
  1403. mAudio.mQueueCond.notify_one();
  1404. }
  1405. if(!video_queue.empty())
  1406. {
  1407. std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
  1408. int ret;
  1409. do {
  1410. ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
  1411. if(ret != AVERROR(EAGAIN)) video_queue.pop();
  1412. } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
  1413. lock.unlock();
  1414. mVideo.mQueueCond.notify_one();
  1415. }
  1416. /* If the queues are completely empty, or it's not full and there's
  1417. * more input to read, go get more.
  1418. */
  1419. size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
  1420. if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
  1421. break;
  1422. if(!mPlaying.load(std::memory_order_relaxed))
  1423. {
  1424. if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
  1425. (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
  1426. {
  1427. /* Set the base time 50ms ahead of the current av time. */
  1428. mClockBase = get_avtime() + milliseconds(50);
  1429. mVideo.mCurrentPtsTime = mClockBase;
  1430. mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
  1431. mAudio.startPlayback();
  1432. mPlaying.store(std::memory_order_release);
  1433. }
  1434. }
  1435. /* Nothing to send or get for now, wait a bit and try again. */
  1436. { std::unique_lock<std::mutex> lock(mSendMtx);
  1437. if(mSendDataGood.test_and_set(std::memory_order_relaxed))
  1438. mSendCond.wait_for(lock, milliseconds(10));
  1439. }
  1440. } while(!mQuit.load(std::memory_order_relaxed));
  1441. }
  1442. /* Pass a null packet to finish the send buffers (the receive functions
  1443. * will get AVERROR_EOF when emptied).
  1444. */
  1445. if(mVideo.mCodecCtx)
  1446. {
  1447. { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
  1448. avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
  1449. }
  1450. mVideo.mQueueCond.notify_one();
  1451. }
  1452. if(mAudio.mCodecCtx)
  1453. {
  1454. { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
  1455. avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
  1456. }
  1457. mAudio.mQueueCond.notify_one();
  1458. }
  1459. video_queue.clear();
  1460. audio_queue.clear();
  1461. /* all done - wait for it */
  1462. if(mVideoThread.joinable())
  1463. mVideoThread.join();
  1464. if(mAudioThread.joinable())
  1465. mAudioThread.join();
  1466. mVideo.mEOS = true;
  1467. std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
  1468. while(!mVideo.mFinalUpdate)
  1469. mVideo.mPictQCond.wait(lock);
  1470. lock.unlock();
  1471. SDL_Event evt{};
  1472. evt.user.type = FF_MOVIE_DONE_EVENT;
  1473. SDL_PushEvent(&evt);
  1474. return 0;
  1475. }
  1476. // Helper class+method to print the time with human-readable formatting.
  1477. struct PrettyTime {
  1478. seconds mTime;
  1479. };
  1480. inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
  1481. {
  1482. using hours = std::chrono::hours;
  1483. using minutes = std::chrono::minutes;
  1484. using std::chrono::duration_cast;
  1485. seconds t = rhs.mTime;
  1486. if(t.count() < 0)
  1487. {
  1488. os << '-';
  1489. t *= -1;
  1490. }
  1491. // Only handle up to hour formatting
  1492. if(t >= hours(1))
  1493. os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
  1494. << (duration_cast<minutes>(t).count() % 60) << 'm';
  1495. else
  1496. os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
  1497. os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
  1498. << std::setfill(' ');
  1499. return os;
  1500. }
  1501. } // namespace
  1502. int main(int argc, char *argv[])
  1503. {
  1504. std::unique_ptr<MovieState> movState;
  1505. if(argc < 2)
  1506. {
  1507. std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
  1508. return 1;
  1509. }
  1510. /* Register all formats and codecs */
  1511. av_register_all();
  1512. /* Initialize networking protocols */
  1513. avformat_network_init();
  1514. if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
  1515. {
  1516. std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
  1517. return 1;
  1518. }
  1519. /* Make a window to put our video */
  1520. SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
  1521. if(!screen)
  1522. {
  1523. std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
  1524. return 1;
  1525. }
  1526. /* Make a renderer to handle the texture image surface and rendering. */
  1527. Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
  1528. SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
  1529. if(renderer)
  1530. {
  1531. SDL_RendererInfo rinf{};
  1532. bool ok = false;
  1533. /* Make sure the renderer supports IYUV textures. If not, fallback to a
  1534. * software renderer. */
  1535. if(SDL_GetRendererInfo(renderer, &rinf) == 0)
  1536. {
  1537. for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
  1538. ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
  1539. }
  1540. if(!ok)
  1541. {
  1542. std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
  1543. SDL_DestroyRenderer(renderer);
  1544. renderer = nullptr;
  1545. }
  1546. }
  1547. if(!renderer)
  1548. {
  1549. render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
  1550. renderer = SDL_CreateRenderer(screen, -1, render_flags);
  1551. }
  1552. if(!renderer)
  1553. {
  1554. std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
  1555. return 1;
  1556. }
  1557. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1558. SDL_RenderFillRect(renderer, nullptr);
  1559. SDL_RenderPresent(renderer);
  1560. /* Open an audio device */
  1561. ++argv; --argc;
  1562. if(InitAL(&argv, &argc))
  1563. {
  1564. std::cerr<< "Failed to set up audio device" <<std::endl;
  1565. return 1;
  1566. }
  1567. { auto device = alcGetContextsDevice(alcGetCurrentContext());
  1568. if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
  1569. {
  1570. std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
  1571. alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
  1572. alcGetProcAddress(device, "alcGetInteger64vSOFT")
  1573. );
  1574. }
  1575. }
  1576. if(alIsExtensionPresent("AL_SOFT_source_latency"))
  1577. {
  1578. std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
  1579. alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
  1580. alGetProcAddress("alGetSourcei64vSOFT")
  1581. );
  1582. }
  1583. #ifdef AL_SOFT_map_buffer
  1584. if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
  1585. {
  1586. std::cout<< "Found AL_SOFT_map_buffer" <<std::endl;
  1587. alBufferStorageSOFT = reinterpret_cast<LPALBUFFERSTORAGESOFT>(
  1588. alGetProcAddress("alBufferStorageSOFT"));
  1589. alMapBufferSOFT = reinterpret_cast<LPALMAPBUFFERSOFT>(
  1590. alGetProcAddress("alMapBufferSOFT"));
  1591. alUnmapBufferSOFT = reinterpret_cast<LPALUNMAPBUFFERSOFT>(
  1592. alGetProcAddress("alUnmapBufferSOFT"));
  1593. }
  1594. #endif
  1595. #ifdef AL_SOFT_events
  1596. if(alIsExtensionPresent("AL_SOFTX_events"))
  1597. {
  1598. std::cout<< "Found AL_SOFT_events" <<std::endl;
  1599. alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
  1600. alGetProcAddress("alEventControlSOFT"));
  1601. alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
  1602. alGetProcAddress("alEventCallbackSOFT"));
  1603. }
  1604. #endif
  1605. int fileidx = 0;
  1606. for(;fileidx < argc;++fileidx)
  1607. {
  1608. if(strcmp(argv[fileidx], "-direct") == 0)
  1609. {
  1610. if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
  1611. std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
  1612. else
  1613. {
  1614. std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
  1615. EnableDirectOut = true;
  1616. }
  1617. }
  1618. else if(strcmp(argv[fileidx], "-wide") == 0)
  1619. {
  1620. if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
  1621. std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
  1622. else
  1623. {
  1624. std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
  1625. EnableWideStereo = true;
  1626. }
  1627. }
  1628. else
  1629. break;
  1630. }
  1631. while(fileidx < argc && !movState)
  1632. {
  1633. movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
  1634. if(!movState->prepare()) movState = nullptr;
  1635. }
  1636. if(!movState)
  1637. {
  1638. std::cerr<< "Could not start a video" <<std::endl;
  1639. return 1;
  1640. }
  1641. movState->setTitle(screen);
  1642. /* Default to going to the next movie at the end of one. */
  1643. enum class EomAction {
  1644. Next, Quit
  1645. } eom_action = EomAction::Next;
  1646. seconds last_time(-1);
  1647. SDL_Event event;
  1648. while(1)
  1649. {
  1650. int have_evt = SDL_WaitEventTimeout(&event, 10);
  1651. auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
  1652. if(cur_time != last_time)
  1653. {
  1654. auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
  1655. std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
  1656. last_time = cur_time;
  1657. }
  1658. if(!have_evt) continue;
  1659. switch(event.type)
  1660. {
  1661. case SDL_KEYDOWN:
  1662. switch(event.key.keysym.sym)
  1663. {
  1664. case SDLK_ESCAPE:
  1665. movState->mQuit = true;
  1666. eom_action = EomAction::Quit;
  1667. break;
  1668. case SDLK_n:
  1669. movState->mQuit = true;
  1670. eom_action = EomAction::Next;
  1671. break;
  1672. default:
  1673. break;
  1674. }
  1675. break;
  1676. case SDL_WINDOWEVENT:
  1677. switch(event.window.event)
  1678. {
  1679. case SDL_WINDOWEVENT_RESIZED:
  1680. SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
  1681. SDL_RenderFillRect(renderer, nullptr);
  1682. break;
  1683. default:
  1684. break;
  1685. }
  1686. break;
  1687. case SDL_QUIT:
  1688. movState->mQuit = true;
  1689. eom_action = EomAction::Quit;
  1690. break;
  1691. case FF_UPDATE_EVENT:
  1692. reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
  1693. screen, renderer
  1694. );
  1695. break;
  1696. case FF_REFRESH_EVENT:
  1697. reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
  1698. screen, renderer
  1699. );
  1700. break;
  1701. case FF_MOVIE_DONE_EVENT:
  1702. std::cout<<'\n';
  1703. last_time = seconds(-1);
  1704. if(eom_action != EomAction::Quit)
  1705. {
  1706. movState = nullptr;
  1707. while(fileidx < argc && !movState)
  1708. {
  1709. movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
  1710. if(!movState->prepare()) movState = nullptr;
  1711. }
  1712. if(movState)
  1713. {
  1714. movState->setTitle(screen);
  1715. break;
  1716. }
  1717. }
  1718. /* Nothing more to play. Shut everything down and quit. */
  1719. movState = nullptr;
  1720. CloseAL();
  1721. SDL_DestroyRenderer(renderer);
  1722. renderer = nullptr;
  1723. SDL_DestroyWindow(screen);
  1724. screen = nullptr;
  1725. SDL_Quit();
  1726. exit(0);
  1727. default:
  1728. break;
  1729. }
  1730. }
  1731. std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
  1732. return 1;
  1733. }