🛠️🐜 Antkeeper superbuild with dependencies included https://antkeeper.com
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

948 lines
34 KiB

  1. #include "config.h"
  2. #include "voice.h"
  3. #include <algorithm>
  4. #include <array>
  5. #include <atomic>
  6. #include <cassert>
  7. #include <cstdint>
  8. #include <iterator>
  9. #include <memory>
  10. #include <new>
  11. #include <stdlib.h>
  12. #include <utility>
  13. #include <vector>
  14. #include "albyte.h"
  15. #include "alnumeric.h"
  16. #include "aloptional.h"
  17. #include "alspan.h"
  18. #include "alstring.h"
  19. #include "ambidefs.h"
  20. #include "async_event.h"
  21. #include "buffer_storage.h"
  22. #include "context.h"
  23. #include "cpu_caps.h"
  24. #include "devformat.h"
  25. #include "device.h"
  26. #include "filters/biquad.h"
  27. #include "filters/nfc.h"
  28. #include "filters/splitter.h"
  29. #include "fmt_traits.h"
  30. #include "logging.h"
  31. #include "mixer.h"
  32. #include "mixer/defs.h"
  33. #include "mixer/hrtfdefs.h"
  34. #include "opthelpers.h"
  35. #include "resampler_limits.h"
  36. #include "ringbuffer.h"
  37. #include "vector.h"
  38. #include "voice_change.h"
  39. struct CTag;
  40. #ifdef HAVE_SSE
  41. struct SSETag;
  42. #endif
  43. #ifdef HAVE_NEON
  44. struct NEONTag;
  45. #endif
  46. struct CopyTag;
  47. static_assert(!(sizeof(DeviceBase::MixerBufferLine)&15),
  48. "DeviceBase::MixerBufferLine must be a multiple of 16 bytes");
  49. static_assert(!(MaxResamplerEdge&3), "MaxResamplerEdge is not a multiple of 4");
  50. Resampler ResamplerDefault{Resampler::Linear};
  51. namespace {
  52. using uint = unsigned int;
  53. using HrtfMixerFunc = void(*)(const float *InSamples, float2 *AccumSamples, const uint IrSize,
  54. const MixHrtfFilter *hrtfparams, const size_t BufferSize);
  55. using HrtfMixerBlendFunc = void(*)(const float *InSamples, float2 *AccumSamples,
  56. const uint IrSize, const HrtfFilter *oldparams, const MixHrtfFilter *newparams,
  57. const size_t BufferSize);
  58. HrtfMixerFunc MixHrtfSamples{MixHrtf_<CTag>};
  59. HrtfMixerBlendFunc MixHrtfBlendSamples{MixHrtfBlend_<CTag>};
  60. inline MixerFunc SelectMixer()
  61. {
  62. #ifdef HAVE_NEON
  63. if((CPUCapFlags&CPU_CAP_NEON))
  64. return Mix_<NEONTag>;
  65. #endif
  66. #ifdef HAVE_SSE
  67. if((CPUCapFlags&CPU_CAP_SSE))
  68. return Mix_<SSETag>;
  69. #endif
  70. return Mix_<CTag>;
  71. }
  72. inline HrtfMixerFunc SelectHrtfMixer()
  73. {
  74. #ifdef HAVE_NEON
  75. if((CPUCapFlags&CPU_CAP_NEON))
  76. return MixHrtf_<NEONTag>;
  77. #endif
  78. #ifdef HAVE_SSE
  79. if((CPUCapFlags&CPU_CAP_SSE))
  80. return MixHrtf_<SSETag>;
  81. #endif
  82. return MixHrtf_<CTag>;
  83. }
  84. inline HrtfMixerBlendFunc SelectHrtfBlendMixer()
  85. {
  86. #ifdef HAVE_NEON
  87. if((CPUCapFlags&CPU_CAP_NEON))
  88. return MixHrtfBlend_<NEONTag>;
  89. #endif
  90. #ifdef HAVE_SSE
  91. if((CPUCapFlags&CPU_CAP_SSE))
  92. return MixHrtfBlend_<SSETag>;
  93. #endif
  94. return MixHrtfBlend_<CTag>;
  95. }
  96. } // namespace
  97. void Voice::InitMixer(al::optional<std::string> resampler)
  98. {
  99. if(resampler)
  100. {
  101. struct ResamplerEntry {
  102. const char name[16];
  103. const Resampler resampler;
  104. };
  105. constexpr ResamplerEntry ResamplerList[]{
  106. { "none", Resampler::Point },
  107. { "point", Resampler::Point },
  108. { "linear", Resampler::Linear },
  109. { "cubic", Resampler::Cubic },
  110. { "bsinc12", Resampler::BSinc12 },
  111. { "fast_bsinc12", Resampler::FastBSinc12 },
  112. { "bsinc24", Resampler::BSinc24 },
  113. { "fast_bsinc24", Resampler::FastBSinc24 },
  114. };
  115. const char *str{resampler->c_str()};
  116. if(al::strcasecmp(str, "bsinc") == 0)
  117. {
  118. WARN("Resampler option \"%s\" is deprecated, using bsinc12\n", str);
  119. str = "bsinc12";
  120. }
  121. else if(al::strcasecmp(str, "sinc4") == 0 || al::strcasecmp(str, "sinc8") == 0)
  122. {
  123. WARN("Resampler option \"%s\" is deprecated, using cubic\n", str);
  124. str = "cubic";
  125. }
  126. auto iter = std::find_if(std::begin(ResamplerList), std::end(ResamplerList),
  127. [str](const ResamplerEntry &entry) -> bool
  128. { return al::strcasecmp(str, entry.name) == 0; });
  129. if(iter == std::end(ResamplerList))
  130. ERR("Invalid resampler: %s\n", str);
  131. else
  132. ResamplerDefault = iter->resampler;
  133. }
  134. MixSamples = SelectMixer();
  135. MixHrtfBlendSamples = SelectHrtfBlendMixer();
  136. MixHrtfSamples = SelectHrtfMixer();
  137. }
  138. namespace {
  139. void SendSourceStoppedEvent(ContextBase *context, uint id)
  140. {
  141. RingBuffer *ring{context->mAsyncEvents.get()};
  142. auto evt_vec = ring->getWriteVector();
  143. if(evt_vec.first.len < 1) return;
  144. AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
  145. AsyncEvent::SourceStateChange)};
  146. evt->u.srcstate.id = id;
  147. evt->u.srcstate.state = AsyncEvent::SrcState::Stop;
  148. ring->writeAdvance(1);
  149. }
  150. const float *DoFilters(BiquadFilter &lpfilter, BiquadFilter &hpfilter, float *dst,
  151. const al::span<const float> src, int type)
  152. {
  153. switch(type)
  154. {
  155. case AF_None:
  156. lpfilter.clear();
  157. hpfilter.clear();
  158. break;
  159. case AF_LowPass:
  160. lpfilter.process(src, dst);
  161. hpfilter.clear();
  162. return dst;
  163. case AF_HighPass:
  164. lpfilter.clear();
  165. hpfilter.process(src, dst);
  166. return dst;
  167. case AF_BandPass:
  168. DualBiquad{lpfilter, hpfilter}.process(src, dst);
  169. return dst;
  170. }
  171. return src.data();
  172. }
  173. template<FmtType Type>
  174. inline void LoadSamples(const al::span<float*> dstSamples, const size_t dstOffset,
  175. const al::byte *src, const size_t srcOffset, const FmtChannels srcChans, const size_t srcStep,
  176. const size_t samples) noexcept
  177. {
  178. constexpr size_t sampleSize{sizeof(typename al::FmtTypeTraits<Type>::Type)};
  179. auto s = src + srcOffset*srcStep*sampleSize;
  180. if(srcChans == FmtUHJ2 || srcChans == FmtSuperStereo)
  181. {
  182. al::LoadSampleArray<Type>(dstSamples[0]+dstOffset, s, srcStep, samples);
  183. al::LoadSampleArray<Type>(dstSamples[1]+dstOffset, s+sampleSize, srcStep, samples);
  184. std::fill_n(dstSamples[2]+dstOffset, samples, 0.0f);
  185. }
  186. else
  187. {
  188. for(auto *dst : dstSamples)
  189. {
  190. al::LoadSampleArray<Type>(dst+dstOffset, s, srcStep, samples);
  191. s += sampleSize;
  192. }
  193. }
  194. }
  195. void LoadSamples(const al::span<float*> dstSamples, const size_t dstOffset, const al::byte *src,
  196. const size_t srcOffset, const FmtType srcType, const FmtChannels srcChans,
  197. const size_t srcStep, const size_t samples) noexcept
  198. {
  199. #define HANDLE_FMT(T) case T: \
  200. LoadSamples<T>(dstSamples, dstOffset, src, srcOffset, srcChans, srcStep, \
  201. samples); \
  202. break
  203. switch(srcType)
  204. {
  205. HANDLE_FMT(FmtUByte);
  206. HANDLE_FMT(FmtShort);
  207. HANDLE_FMT(FmtFloat);
  208. HANDLE_FMT(FmtDouble);
  209. HANDLE_FMT(FmtMulaw);
  210. HANDLE_FMT(FmtAlaw);
  211. }
  212. #undef HANDLE_FMT
  213. }
  214. void LoadBufferStatic(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
  215. const size_t dataPosInt, const FmtType sampleType, const FmtChannels sampleChannels,
  216. const size_t srcStep, const size_t samplesToLoad, const al::span<float*> voiceSamples)
  217. {
  218. const uint loopStart{buffer->mLoopStart};
  219. const uint loopEnd{buffer->mLoopEnd};
  220. ASSUME(loopEnd > loopStart);
  221. /* If current pos is beyond the loop range, do not loop */
  222. if(!bufferLoopItem || dataPosInt >= loopEnd)
  223. {
  224. /* Load what's left to play from the buffer */
  225. const size_t remaining{minz(samplesToLoad, buffer->mSampleLen-dataPosInt)};
  226. LoadSamples(voiceSamples, 0, buffer->mSamples, dataPosInt, sampleType, sampleChannels,
  227. srcStep, remaining);
  228. if(const size_t toFill{samplesToLoad - remaining})
  229. {
  230. for(auto *chanbuffer : voiceSamples)
  231. {
  232. auto srcsamples = chanbuffer + remaining - 1;
  233. std::fill_n(srcsamples + 1, toFill, *srcsamples);
  234. }
  235. }
  236. }
  237. else
  238. {
  239. /* Load what's left of this loop iteration */
  240. const size_t remaining{minz(samplesToLoad, loopEnd-dataPosInt)};
  241. LoadSamples(voiceSamples, 0, buffer->mSamples, dataPosInt, sampleType, sampleChannels,
  242. srcStep, remaining);
  243. /* Load repeats of the loop to fill the buffer. */
  244. const auto loopSize = static_cast<size_t>(loopEnd - loopStart);
  245. size_t samplesLoaded{remaining};
  246. while(const size_t toFill{minz(samplesToLoad - samplesLoaded, loopSize)})
  247. {
  248. LoadSamples(voiceSamples, samplesLoaded, buffer->mSamples, loopStart, sampleType,
  249. sampleChannels, srcStep, toFill);
  250. samplesLoaded += toFill;
  251. }
  252. }
  253. }
  254. void LoadBufferCallback(VoiceBufferItem *buffer, const size_t numCallbackSamples,
  255. const FmtType sampleType, const FmtChannels sampleChannels, const size_t srcStep,
  256. const size_t samplesToLoad, const al::span<float*> voiceSamples)
  257. {
  258. /* Load what's left to play from the buffer */
  259. const size_t remaining{minz(samplesToLoad, numCallbackSamples)};
  260. LoadSamples(voiceSamples, 0, buffer->mSamples, 0, sampleType, sampleChannels, srcStep,
  261. remaining);
  262. if(const size_t toFill{samplesToLoad - remaining})
  263. {
  264. for(auto *chanbuffer : voiceSamples)
  265. {
  266. auto srcsamples = chanbuffer + remaining - 1;
  267. std::fill_n(srcsamples + 1, toFill, *srcsamples);
  268. }
  269. }
  270. }
  271. void LoadBufferQueue(VoiceBufferItem *buffer, VoiceBufferItem *bufferLoopItem,
  272. size_t dataPosInt, const FmtType sampleType, const FmtChannels sampleChannels,
  273. const size_t srcStep, const size_t samplesToLoad, const al::span<float*> voiceSamples)
  274. {
  275. /* Crawl the buffer queue to fill in the temp buffer */
  276. size_t samplesLoaded{0};
  277. while(buffer && samplesLoaded != samplesToLoad)
  278. {
  279. if(dataPosInt >= buffer->mSampleLen)
  280. {
  281. dataPosInt -= buffer->mSampleLen;
  282. buffer = buffer->mNext.load(std::memory_order_acquire);
  283. if(!buffer) buffer = bufferLoopItem;
  284. continue;
  285. }
  286. const size_t remaining{minz(samplesToLoad-samplesLoaded, buffer->mSampleLen-dataPosInt)};
  287. LoadSamples(voiceSamples, samplesLoaded, buffer->mSamples, dataPosInt, sampleType,
  288. sampleChannels, srcStep, remaining);
  289. samplesLoaded += remaining;
  290. if(samplesLoaded == samplesToLoad)
  291. break;
  292. dataPosInt = 0;
  293. buffer = buffer->mNext.load(std::memory_order_acquire);
  294. if(!buffer) buffer = bufferLoopItem;
  295. }
  296. if(const size_t toFill{samplesToLoad - samplesLoaded})
  297. {
  298. size_t chanidx{0};
  299. for(auto *chanbuffer : voiceSamples)
  300. {
  301. auto srcsamples = chanbuffer + samplesLoaded - 1;
  302. std::fill_n(srcsamples + 1, toFill, *srcsamples);
  303. ++chanidx;
  304. }
  305. }
  306. }
  307. void DoHrtfMix(const float *samples, const uint DstBufferSize, DirectParams &parms,
  308. const float TargetGain, const uint Counter, uint OutPos, const bool IsPlaying,
  309. DeviceBase *Device)
  310. {
  311. const uint IrSize{Device->mIrSize};
  312. auto &HrtfSamples = Device->HrtfSourceData;
  313. auto &AccumSamples = Device->HrtfAccumData;
  314. /* Copy the HRTF history and new input samples into a temp buffer. */
  315. auto src_iter = std::copy(parms.Hrtf.History.begin(), parms.Hrtf.History.end(),
  316. std::begin(HrtfSamples));
  317. std::copy_n(samples, DstBufferSize, src_iter);
  318. /* Copy the last used samples back into the history buffer for later. */
  319. if(likely(IsPlaying))
  320. std::copy_n(std::begin(HrtfSamples) + DstBufferSize, parms.Hrtf.History.size(),
  321. parms.Hrtf.History.begin());
  322. /* If fading and this is the first mixing pass, fade between the IRs. */
  323. uint fademix{0u};
  324. if(Counter && OutPos == 0)
  325. {
  326. fademix = minu(DstBufferSize, Counter);
  327. float gain{TargetGain};
  328. /* The new coefficients need to fade in completely since they're
  329. * replacing the old ones. To keep the gain fading consistent,
  330. * interpolate between the old and new target gains given how much of
  331. * the fade time this mix handles.
  332. */
  333. if(Counter > fademix)
  334. {
  335. const float a{static_cast<float>(fademix) / static_cast<float>(Counter)};
  336. gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
  337. }
  338. MixHrtfFilter hrtfparams{
  339. parms.Hrtf.Target.Coeffs,
  340. parms.Hrtf.Target.Delay,
  341. 0.0f, gain / static_cast<float>(fademix)};
  342. MixHrtfBlendSamples(HrtfSamples, AccumSamples+OutPos, IrSize, &parms.Hrtf.Old, &hrtfparams,
  343. fademix);
  344. /* Update the old parameters with the result. */
  345. parms.Hrtf.Old = parms.Hrtf.Target;
  346. parms.Hrtf.Old.Gain = gain;
  347. OutPos += fademix;
  348. }
  349. if(fademix < DstBufferSize)
  350. {
  351. const uint todo{DstBufferSize - fademix};
  352. float gain{TargetGain};
  353. /* Interpolate the target gain if the gain fading lasts longer than
  354. * this mix.
  355. */
  356. if(Counter > DstBufferSize)
  357. {
  358. const float a{static_cast<float>(todo) / static_cast<float>(Counter-fademix)};
  359. gain = lerpf(parms.Hrtf.Old.Gain, TargetGain, a);
  360. }
  361. MixHrtfFilter hrtfparams{
  362. parms.Hrtf.Target.Coeffs,
  363. parms.Hrtf.Target.Delay,
  364. parms.Hrtf.Old.Gain,
  365. (gain - parms.Hrtf.Old.Gain) / static_cast<float>(todo)};
  366. MixHrtfSamples(HrtfSamples+fademix, AccumSamples+OutPos, IrSize, &hrtfparams, todo);
  367. /* Store the now-current gain for next time. */
  368. parms.Hrtf.Old.Gain = gain;
  369. }
  370. }
  371. void DoNfcMix(const al::span<const float> samples, FloatBufferLine *OutBuffer, DirectParams &parms,
  372. const float *TargetGains, const uint Counter, const uint OutPos, DeviceBase *Device)
  373. {
  374. using FilterProc = void (NfcFilter::*)(const al::span<const float>, float*);
  375. static constexpr FilterProc NfcProcess[MaxAmbiOrder+1]{
  376. nullptr, &NfcFilter::process1, &NfcFilter::process2, &NfcFilter::process3};
  377. float *CurrentGains{parms.Gains.Current.data()};
  378. MixSamples(samples, {OutBuffer, 1u}, CurrentGains, TargetGains, Counter, OutPos);
  379. ++OutBuffer;
  380. ++CurrentGains;
  381. ++TargetGains;
  382. const al::span<float> nfcsamples{Device->NfcSampleData, samples.size()};
  383. size_t order{1};
  384. while(const size_t chancount{Device->NumChannelsPerOrder[order]})
  385. {
  386. (parms.NFCtrlFilter.*NfcProcess[order])(samples, nfcsamples.data());
  387. MixSamples(nfcsamples, {OutBuffer, chancount}, CurrentGains, TargetGains, Counter, OutPos);
  388. OutBuffer += chancount;
  389. CurrentGains += chancount;
  390. TargetGains += chancount;
  391. if(++order == MaxAmbiOrder+1)
  392. break;
  393. }
  394. }
  395. } // namespace
  396. void Voice::mix(const State vstate, ContextBase *Context, const uint SamplesToDo)
  397. {
  398. static constexpr std::array<float,MAX_OUTPUT_CHANNELS> SilentTarget{};
  399. ASSUME(SamplesToDo > 0);
  400. /* Get voice info */
  401. uint DataPosInt{mPosition.load(std::memory_order_relaxed)};
  402. uint DataPosFrac{mPositionFrac.load(std::memory_order_relaxed)};
  403. VoiceBufferItem *BufferListItem{mCurrentBuffer.load(std::memory_order_relaxed)};
  404. VoiceBufferItem *BufferLoopItem{mLoopBuffer.load(std::memory_order_relaxed)};
  405. const uint increment{mStep};
  406. if UNLIKELY(increment < 1)
  407. {
  408. /* If the voice is supposed to be stopping but can't be mixed, just
  409. * stop it before bailing.
  410. */
  411. if(vstate == Stopping)
  412. mPlayState.store(Stopped, std::memory_order_release);
  413. return;
  414. }
  415. DeviceBase *Device{Context->mDevice};
  416. const uint NumSends{Device->NumAuxSends};
  417. ResamplerFunc Resample{(increment == MixerFracOne && DataPosFrac == 0) ?
  418. Resample_<CopyTag,CTag> : mResampler};
  419. uint Counter{mFlags.test(VoiceIsFading) ? SamplesToDo : 0};
  420. if(!Counter)
  421. {
  422. /* No fading, just overwrite the old/current params. */
  423. for(auto &chandata : mChans)
  424. {
  425. {
  426. DirectParams &parms = chandata.mDryParams;
  427. if(!mFlags.test(VoiceHasHrtf))
  428. parms.Gains.Current = parms.Gains.Target;
  429. else
  430. parms.Hrtf.Old = parms.Hrtf.Target;
  431. }
  432. for(uint send{0};send < NumSends;++send)
  433. {
  434. if(mSend[send].Buffer.empty())
  435. continue;
  436. SendParams &parms = chandata.mWetParams[send];
  437. parms.Gains.Current = parms.Gains.Target;
  438. }
  439. }
  440. }
  441. else if UNLIKELY(!BufferListItem)
  442. Counter = std::min(Counter, 64u);
  443. std::array<float*,DeviceBase::MixerChannelsMax> SamplePointers;
  444. const al::span<float*> MixingSamples{SamplePointers.data(), mChans.size()};
  445. auto offset_bufferline = [](DeviceBase::MixerBufferLine &bufline) noexcept -> float*
  446. { return bufline.data() + MaxResamplerEdge; };
  447. std::transform(Device->mSampleData.end() - mChans.size(), Device->mSampleData.end(),
  448. MixingSamples.begin(), offset_bufferline);
  449. const uint PostPadding{MaxResamplerEdge + mDecoderPadding};
  450. uint buffers_done{0u};
  451. uint OutPos{0u};
  452. do {
  453. /* Figure out how many buffer samples will be needed */
  454. uint DstBufferSize{SamplesToDo - OutPos};
  455. uint SrcBufferSize;
  456. if(increment <= MixerFracOne)
  457. {
  458. /* Calculate the last written dst sample pos. */
  459. uint64_t DataSize64{DstBufferSize - 1};
  460. /* Calculate the last read src sample pos. */
  461. DataSize64 = (DataSize64*increment + DataPosFrac) >> MixerFracBits;
  462. /* +1 to get the src sample count, include padding. */
  463. DataSize64 += 1 + PostPadding;
  464. /* Result is guaranteed to be <= BufferLineSize+PostPadding since
  465. * we won't use more src samples than dst samples+padding.
  466. */
  467. SrcBufferSize = static_cast<uint>(DataSize64);
  468. }
  469. else
  470. {
  471. uint64_t DataSize64{DstBufferSize};
  472. /* Calculate the end src sample pos, include padding. */
  473. DataSize64 = (DataSize64*increment + DataPosFrac) >> MixerFracBits;
  474. DataSize64 += PostPadding;
  475. if(DataSize64 <= DeviceBase::MixerLineSize - MaxResamplerEdge)
  476. SrcBufferSize = static_cast<uint>(DataSize64);
  477. else
  478. {
  479. /* If the source size got saturated, we can't fill the desired
  480. * dst size. Figure out how many samples we can actually mix.
  481. */
  482. SrcBufferSize = DeviceBase::MixerLineSize - MaxResamplerEdge;
  483. DataSize64 = SrcBufferSize - PostPadding;
  484. DataSize64 = ((DataSize64<<MixerFracBits) - DataPosFrac) / increment;
  485. if(DataSize64 < DstBufferSize)
  486. {
  487. /* Some mixers require being 16-byte aligned, so also limit
  488. * to a multiple of 4 samples to maintain alignment.
  489. */
  490. DstBufferSize = static_cast<uint>(DataSize64) & ~3u;
  491. /* If the voice is stopping, only one mixing iteration will
  492. * be done, so ensure it fades out completely this mix.
  493. */
  494. if(unlikely(vstate == Stopping))
  495. Counter = std::min(Counter, DstBufferSize);
  496. }
  497. ASSUME(DstBufferSize > 0);
  498. }
  499. }
  500. if(unlikely(!BufferListItem))
  501. {
  502. const size_t srcOffset{(increment*DstBufferSize + DataPosFrac)>>MixerFracBits};
  503. auto prevSamples = mPrevSamples.data();
  504. SrcBufferSize = SrcBufferSize - PostPadding + MaxResamplerEdge;
  505. for(auto *chanbuffer : MixingSamples)
  506. {
  507. auto srcend = std::copy_n(prevSamples->data(), MaxResamplerPadding,
  508. chanbuffer-MaxResamplerEdge);
  509. /* When loading from a voice that ended prematurely, only take
  510. * the samples that get closest to 0 amplitude. This helps
  511. * certain sounds fade out better.
  512. */
  513. auto abs_lt = [](const float lhs, const float rhs) noexcept -> bool
  514. { return std::abs(lhs) < std::abs(rhs); };
  515. auto srciter = std::min_element(chanbuffer, srcend, abs_lt);
  516. std::fill(srciter+1, chanbuffer + SrcBufferSize, *srciter);
  517. std::copy_n(chanbuffer-MaxResamplerEdge+srcOffset, prevSamples->size(),
  518. prevSamples->data());
  519. ++prevSamples;
  520. }
  521. }
  522. else
  523. {
  524. auto prevSamples = mPrevSamples.data();
  525. for(auto *chanbuffer : MixingSamples)
  526. {
  527. std::copy_n(prevSamples->data(), MaxResamplerEdge, chanbuffer-MaxResamplerEdge);
  528. ++prevSamples;
  529. }
  530. if(mFlags.test(VoiceIsStatic))
  531. LoadBufferStatic(BufferListItem, BufferLoopItem, DataPosInt, mFmtType,
  532. mFmtChannels, mFrameStep, SrcBufferSize, MixingSamples);
  533. else if(mFlags.test(VoiceIsCallback))
  534. {
  535. if(!mFlags.test(VoiceCallbackStopped) && SrcBufferSize > mNumCallbackSamples)
  536. {
  537. const size_t byteOffset{mNumCallbackSamples*mFrameSize};
  538. const size_t needBytes{SrcBufferSize*mFrameSize - byteOffset};
  539. const int gotBytes{BufferListItem->mCallback(BufferListItem->mUserData,
  540. &BufferListItem->mSamples[byteOffset], static_cast<int>(needBytes))};
  541. if(gotBytes < 0)
  542. mFlags.set(VoiceCallbackStopped);
  543. else if(static_cast<uint>(gotBytes) < needBytes)
  544. {
  545. mFlags.set(VoiceCallbackStopped);
  546. mNumCallbackSamples += static_cast<uint>(gotBytes) / mFrameSize;
  547. }
  548. else
  549. mNumCallbackSamples = SrcBufferSize;
  550. }
  551. LoadBufferCallback(BufferListItem, mNumCallbackSamples, mFmtType, mFmtChannels,
  552. mFrameStep, SrcBufferSize, MixingSamples);
  553. }
  554. else
  555. LoadBufferQueue(BufferListItem, BufferLoopItem, DataPosInt, mFmtType, mFmtChannels,
  556. mFrameStep, SrcBufferSize, MixingSamples);
  557. const size_t srcOffset{(increment*DstBufferSize + DataPosFrac)>>MixerFracBits};
  558. if(mDecoder)
  559. {
  560. SrcBufferSize = SrcBufferSize - PostPadding + MaxResamplerEdge;
  561. mDecoder->decode(MixingSamples, SrcBufferSize,
  562. likely(vstate == Playing) ? srcOffset : 0);
  563. }
  564. /* Store the last source samples used for next time. */
  565. if(likely(vstate == Playing))
  566. {
  567. prevSamples = mPrevSamples.data();
  568. for(auto *chanbuffer : MixingSamples)
  569. {
  570. /* Store the last source samples used for next time. */
  571. std::copy_n(chanbuffer-MaxResamplerEdge+srcOffset, prevSamples->size(),
  572. prevSamples->data());
  573. ++prevSamples;
  574. }
  575. }
  576. }
  577. auto voiceSamples = MixingSamples.begin();
  578. for(auto &chandata : mChans)
  579. {
  580. /* Resample, then apply ambisonic upsampling as needed. */
  581. float *ResampledData{Resample(&mResampleState, *voiceSamples, DataPosFrac, increment,
  582. {Device->ResampledData, DstBufferSize})};
  583. ++voiceSamples;
  584. if(mFlags.test(VoiceIsAmbisonic))
  585. chandata.mAmbiSplitter.processScale({ResampledData, DstBufferSize},
  586. chandata.mAmbiHFScale, chandata.mAmbiLFScale);
  587. /* Now filter and mix to the appropriate outputs. */
  588. const al::span<float,BufferLineSize> FilterBuf{Device->FilteredData};
  589. {
  590. DirectParams &parms = chandata.mDryParams;
  591. const float *samples{DoFilters(parms.LowPass, parms.HighPass, FilterBuf.data(),
  592. {ResampledData, DstBufferSize}, mDirect.FilterType)};
  593. if(mFlags.test(VoiceHasHrtf))
  594. {
  595. const float TargetGain{parms.Hrtf.Target.Gain * likely(vstate == Playing)};
  596. DoHrtfMix(samples, DstBufferSize, parms, TargetGain, Counter, OutPos,
  597. (vstate == Playing), Device);
  598. }
  599. else
  600. {
  601. const float *TargetGains{likely(vstate == Playing) ? parms.Gains.Target.data()
  602. : SilentTarget.data()};
  603. if(mFlags.test(VoiceHasNfc))
  604. DoNfcMix({samples, DstBufferSize}, mDirect.Buffer.data(), parms,
  605. TargetGains, Counter, OutPos, Device);
  606. else
  607. MixSamples({samples, DstBufferSize}, mDirect.Buffer,
  608. parms.Gains.Current.data(), TargetGains, Counter, OutPos);
  609. }
  610. }
  611. for(uint send{0};send < NumSends;++send)
  612. {
  613. if(mSend[send].Buffer.empty())
  614. continue;
  615. SendParams &parms = chandata.mWetParams[send];
  616. const float *samples{DoFilters(parms.LowPass, parms.HighPass, FilterBuf.data(),
  617. {ResampledData, DstBufferSize}, mSend[send].FilterType)};
  618. const float *TargetGains{likely(vstate == Playing) ? parms.Gains.Target.data()
  619. : SilentTarget.data()};
  620. MixSamples({samples, DstBufferSize}, mSend[send].Buffer,
  621. parms.Gains.Current.data(), TargetGains, Counter, OutPos);
  622. }
  623. }
  624. /* If the voice is stopping, we're now done. */
  625. if(unlikely(vstate == Stopping))
  626. break;
  627. /* Update positions */
  628. DataPosFrac += increment*DstBufferSize;
  629. const uint SrcSamplesDone{DataPosFrac>>MixerFracBits};
  630. DataPosInt += SrcSamplesDone;
  631. DataPosFrac &= MixerFracMask;
  632. OutPos += DstBufferSize;
  633. Counter = maxu(DstBufferSize, Counter) - DstBufferSize;
  634. if(unlikely(!BufferListItem))
  635. {
  636. /* Do nothing extra when there's no buffers. */
  637. }
  638. else if(mFlags.test(VoiceIsStatic))
  639. {
  640. if(BufferLoopItem)
  641. {
  642. /* Handle looping static source */
  643. const uint LoopStart{BufferListItem->mLoopStart};
  644. const uint LoopEnd{BufferListItem->mLoopEnd};
  645. if(DataPosInt >= LoopEnd)
  646. {
  647. assert(LoopEnd > LoopStart);
  648. DataPosInt = ((DataPosInt-LoopStart)%(LoopEnd-LoopStart)) + LoopStart;
  649. }
  650. }
  651. else
  652. {
  653. /* Handle non-looping static source */
  654. if(DataPosInt >= BufferListItem->mSampleLen)
  655. {
  656. BufferListItem = nullptr;
  657. break;
  658. }
  659. }
  660. }
  661. else if(mFlags.test(VoiceIsCallback))
  662. {
  663. /* Handle callback buffer source */
  664. if(SrcSamplesDone < mNumCallbackSamples)
  665. {
  666. const size_t byteOffset{SrcSamplesDone*mFrameSize};
  667. const size_t byteEnd{mNumCallbackSamples*mFrameSize};
  668. al::byte *data{BufferListItem->mSamples};
  669. std::copy(data+byteOffset, data+byteEnd, data);
  670. mNumCallbackSamples -= SrcSamplesDone;
  671. }
  672. else
  673. {
  674. BufferListItem = nullptr;
  675. mNumCallbackSamples = 0;
  676. }
  677. }
  678. else
  679. {
  680. /* Handle streaming source */
  681. do {
  682. if(BufferListItem->mSampleLen > DataPosInt)
  683. break;
  684. DataPosInt -= BufferListItem->mSampleLen;
  685. ++buffers_done;
  686. BufferListItem = BufferListItem->mNext.load(std::memory_order_relaxed);
  687. if(!BufferListItem) BufferListItem = BufferLoopItem;
  688. } while(BufferListItem);
  689. }
  690. } while(OutPos < SamplesToDo);
  691. mFlags.set(VoiceIsFading);
  692. /* Don't update positions and buffers if we were stopping. */
  693. if(unlikely(vstate == Stopping))
  694. {
  695. mPlayState.store(Stopped, std::memory_order_release);
  696. return;
  697. }
  698. /* Capture the source ID in case it's reset for stopping. */
  699. const uint SourceID{mSourceID.load(std::memory_order_relaxed)};
  700. /* Update voice info */
  701. mPosition.store(DataPosInt, std::memory_order_relaxed);
  702. mPositionFrac.store(DataPosFrac, std::memory_order_relaxed);
  703. mCurrentBuffer.store(BufferListItem, std::memory_order_relaxed);
  704. if(!BufferListItem)
  705. {
  706. mLoopBuffer.store(nullptr, std::memory_order_relaxed);
  707. mSourceID.store(0u, std::memory_order_relaxed);
  708. }
  709. std::atomic_thread_fence(std::memory_order_release);
  710. /* Send any events now, after the position/buffer info was updated. */
  711. const uint enabledevt{Context->mEnabledEvts.load(std::memory_order_acquire)};
  712. if(buffers_done > 0 && (enabledevt&AsyncEvent::BufferCompleted))
  713. {
  714. RingBuffer *ring{Context->mAsyncEvents.get()};
  715. auto evt_vec = ring->getWriteVector();
  716. if(evt_vec.first.len > 0)
  717. {
  718. AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
  719. AsyncEvent::BufferCompleted)};
  720. evt->u.bufcomp.id = SourceID;
  721. evt->u.bufcomp.count = buffers_done;
  722. ring->writeAdvance(1);
  723. }
  724. }
  725. if(!BufferListItem)
  726. {
  727. /* If the voice just ended, set it to Stopping so the next render
  728. * ensures any residual noise fades to 0 amplitude.
  729. */
  730. mPlayState.store(Stopping, std::memory_order_release);
  731. if((enabledevt&AsyncEvent::SourceStateChange))
  732. SendSourceStoppedEvent(Context, SourceID);
  733. }
  734. }
  735. void Voice::prepare(DeviceBase *device)
  736. {
  737. /* Even if storing really high order ambisonics, we only mix channels for
  738. * orders up to the device order. The rest are simply dropped.
  739. */
  740. uint num_channels{(mFmtChannels == FmtUHJ2 || mFmtChannels == FmtSuperStereo) ? 3 :
  741. ChannelsFromFmt(mFmtChannels, minu(mAmbiOrder, device->mAmbiOrder))};
  742. if(unlikely(num_channels > device->mSampleData.size()))
  743. {
  744. ERR("Unexpected channel count: %u (limit: %zu, %d:%d)\n", num_channels,
  745. device->mSampleData.size(), mFmtChannels, mAmbiOrder);
  746. num_channels = static_cast<uint>(device->mSampleData.size());
  747. }
  748. if(mChans.capacity() > 2 && num_channels < mChans.capacity())
  749. {
  750. decltype(mChans){}.swap(mChans);
  751. decltype(mPrevSamples){}.swap(mPrevSamples);
  752. }
  753. mChans.reserve(maxu(2, num_channels));
  754. mChans.resize(num_channels);
  755. mPrevSamples.reserve(maxu(2, num_channels));
  756. mPrevSamples.resize(num_channels);
  757. if(mFmtChannels == FmtSuperStereo)
  758. {
  759. mDecoder = std::make_unique<UhjStereoDecoder>();
  760. mDecoderPadding = UhjStereoDecoder::sFilterDelay;
  761. }
  762. else if(IsUHJ(mFmtChannels))
  763. {
  764. mDecoder = std::make_unique<UhjDecoder>();
  765. mDecoderPadding = UhjDecoder::sFilterDelay;
  766. }
  767. else
  768. {
  769. mDecoder = nullptr;
  770. mDecoderPadding = 0;
  771. }
  772. /* Clear the stepping value explicitly so the mixer knows not to mix this
  773. * until the update gets applied.
  774. */
  775. mStep = 0;
  776. /* Make sure the sample history is cleared. */
  777. std::fill(mPrevSamples.begin(), mPrevSamples.end(), HistoryLine{});
  778. /* Don't need to set the VoiceIsAmbisonic flag if the device is not higher
  779. * order than the voice. No HF scaling is necessary to mix it.
  780. */
  781. if(mAmbiOrder && device->mAmbiOrder > mAmbiOrder)
  782. {
  783. const uint8_t *OrderFromChan{Is2DAmbisonic(mFmtChannels) ?
  784. AmbiIndex::OrderFrom2DChannel().data() : AmbiIndex::OrderFromChannel().data()};
  785. const auto scales = AmbiScale::GetHFOrderScales(mAmbiOrder, device->mAmbiOrder);
  786. const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
  787. for(auto &chandata : mChans)
  788. {
  789. chandata.mAmbiHFScale = scales[*(OrderFromChan++)];
  790. chandata.mAmbiLFScale = 1.0f;
  791. chandata.mAmbiSplitter = splitter;
  792. chandata.mDryParams = DirectParams{};
  793. chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
  794. std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
  795. }
  796. /* 2-channel UHJ needs different shelf filters. However, we can't just
  797. * use different shelf filters after mixing it and with any old speaker
  798. * setup the user has. To make this work, we apply the expected shelf
  799. * filters for decoding UHJ2 to quad (only needs LF scaling), and act
  800. * as if those 4 quad channels are encoded right back onto first-order
  801. * B-Format, which then upsamples to higher order as normal (only needs
  802. * HF scaling).
  803. *
  804. * This isn't perfect, but without an entirely separate and limited
  805. * UHJ2 path, it's better than nothing.
  806. */
  807. if(mFmtChannels == FmtUHJ2)
  808. {
  809. mChans[0].mAmbiLFScale = UhjDecoder::sWLFScale;
  810. mChans[1].mAmbiLFScale = UhjDecoder::sXYLFScale;
  811. mChans[2].mAmbiLFScale = UhjDecoder::sXYLFScale;
  812. }
  813. mFlags.set(VoiceIsAmbisonic);
  814. }
  815. else if(mFmtChannels == FmtUHJ2 && !device->mUhjEncoder)
  816. {
  817. /* 2-channel UHJ with first-order output also needs the shelf filter
  818. * correction applied, except with UHJ output (UHJ2->B-Format->UHJ2 is
  819. * identity, so don't mess with it).
  820. */
  821. const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
  822. for(auto &chandata : mChans)
  823. {
  824. chandata.mAmbiHFScale = 1.0f;
  825. chandata.mAmbiLFScale = 1.0f;
  826. chandata.mAmbiSplitter = splitter;
  827. chandata.mDryParams = DirectParams{};
  828. chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
  829. std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
  830. }
  831. mChans[0].mAmbiLFScale = UhjDecoder::sWLFScale;
  832. mChans[1].mAmbiLFScale = UhjDecoder::sXYLFScale;
  833. mChans[2].mAmbiLFScale = UhjDecoder::sXYLFScale;
  834. mFlags.set(VoiceIsAmbisonic);
  835. }
  836. else
  837. {
  838. for(auto &chandata : mChans)
  839. {
  840. chandata.mDryParams = DirectParams{};
  841. chandata.mDryParams.NFCtrlFilter = device->mNFCtrlFilter;
  842. std::fill_n(chandata.mWetParams.begin(), device->NumAuxSends, SendParams{});
  843. }
  844. mFlags.reset(VoiceIsAmbisonic);
  845. }
  846. }