🛠️🐜 Antkeeper superbuild with dependencies included https://antkeeper.com
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

337 lines
12 KiB

  1. /**
  2. * This file is part of the OpenAL Soft cross platform audio library
  3. *
  4. * Copyright (C) 2019 by Anis A. Hireche
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. *
  9. * * Redistributions of source code must retain the above copyright notice,
  10. * this list of conditions and the following disclaimer.
  11. *
  12. * * Redistributions in binary form must reproduce the above copyright notice,
  13. * this list of conditions and the following disclaimer in the documentation
  14. * and/or other materials provided with the distribution.
  15. *
  16. * * Neither the name of Spherical-Harmonic-Transform nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  21. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
  24. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  25. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  26. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  27. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  28. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30. * POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #include "config.h"
  33. #include <algorithm>
  34. #include <array>
  35. #include <cstdlib>
  36. #include <functional>
  37. #include <iterator>
  38. #include "alc/effects/base.h"
  39. #include "almalloc.h"
  40. #include "alnumbers.h"
  41. #include "alnumeric.h"
  42. #include "alspan.h"
  43. #include "core/ambidefs.h"
  44. #include "core/bufferline.h"
  45. #include "core/context.h"
  46. #include "core/devformat.h"
  47. #include "core/device.h"
  48. #include "core/effectslot.h"
  49. #include "core/mixer.h"
  50. #include "intrusive_ptr.h"
  51. namespace {
  52. using uint = unsigned int;
  53. #define MAX_UPDATE_SAMPLES 256
  54. #define NUM_FORMANTS 4
  55. #define NUM_FILTERS 2
  56. #define Q_FACTOR 5.0f
  57. #define VOWEL_A_INDEX 0
  58. #define VOWEL_B_INDEX 1
  59. #define WAVEFORM_FRACBITS 24
  60. #define WAVEFORM_FRACONE (1<<WAVEFORM_FRACBITS)
  61. #define WAVEFORM_FRACMASK (WAVEFORM_FRACONE-1)
  62. inline float Sin(uint index)
  63. {
  64. constexpr float scale{al::numbers::pi_v<float>*2.0f / WAVEFORM_FRACONE};
  65. return std::sin(static_cast<float>(index) * scale)*0.5f + 0.5f;
  66. }
  67. inline float Saw(uint index)
  68. { return static_cast<float>(index) / float{WAVEFORM_FRACONE}; }
  69. inline float Triangle(uint index)
  70. { return std::fabs(static_cast<float>(index)*(2.0f/WAVEFORM_FRACONE) - 1.0f); }
  71. inline float Half(uint) { return 0.5f; }
  72. template<float (&func)(uint)>
  73. void Oscillate(float *RESTRICT dst, uint index, const uint step, size_t todo)
  74. {
  75. for(size_t i{0u};i < todo;i++)
  76. {
  77. index += step;
  78. index &= WAVEFORM_FRACMASK;
  79. dst[i] = func(index);
  80. }
  81. }
  82. struct FormantFilter
  83. {
  84. float mCoeff{0.0f};
  85. float mGain{1.0f};
  86. float mS1{0.0f};
  87. float mS2{0.0f};
  88. FormantFilter() = default;
  89. FormantFilter(float f0norm, float gain)
  90. : mCoeff{std::tan(al::numbers::pi_v<float> * f0norm)}, mGain{gain}
  91. { }
  92. inline void process(const float *samplesIn, float *samplesOut, const size_t numInput)
  93. {
  94. /* A state variable filter from a topology-preserving transform.
  95. * Based on a talk given by Ivan Cohen: https://www.youtube.com/watch?v=esjHXGPyrhg
  96. */
  97. const float g{mCoeff};
  98. const float gain{mGain};
  99. const float h{1.0f / (1.0f + (g/Q_FACTOR) + (g*g))};
  100. float s1{mS1};
  101. float s2{mS2};
  102. for(size_t i{0u};i < numInput;i++)
  103. {
  104. const float H{(samplesIn[i] - (1.0f/Q_FACTOR + g)*s1 - s2)*h};
  105. const float B{g*H + s1};
  106. const float L{g*B + s2};
  107. s1 = g*H + B;
  108. s2 = g*B + L;
  109. // Apply peak and accumulate samples.
  110. samplesOut[i] += B * gain;
  111. }
  112. mS1 = s1;
  113. mS2 = s2;
  114. }
  115. inline void clear()
  116. {
  117. mS1 = 0.0f;
  118. mS2 = 0.0f;
  119. }
  120. };
  121. struct VmorpherState final : public EffectState {
  122. struct {
  123. /* Effect parameters */
  124. FormantFilter Formants[NUM_FILTERS][NUM_FORMANTS];
  125. /* Effect gains for each channel */
  126. float CurrentGains[MAX_OUTPUT_CHANNELS]{};
  127. float TargetGains[MAX_OUTPUT_CHANNELS]{};
  128. } mChans[MaxAmbiChannels];
  129. void (*mGetSamples)(float*RESTRICT, uint, const uint, size_t){};
  130. uint mIndex{0};
  131. uint mStep{1};
  132. /* Effects buffers */
  133. alignas(16) float mSampleBufferA[MAX_UPDATE_SAMPLES]{};
  134. alignas(16) float mSampleBufferB[MAX_UPDATE_SAMPLES]{};
  135. alignas(16) float mLfo[MAX_UPDATE_SAMPLES]{};
  136. void deviceUpdate(const DeviceBase *device, const Buffer &buffer) override;
  137. void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
  138. const EffectTarget target) override;
  139. void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
  140. const al::span<FloatBufferLine> samplesOut) override;
  141. static std::array<FormantFilter,4> getFiltersByPhoneme(VMorpherPhenome phoneme,
  142. float frequency, float pitch);
  143. DEF_NEWDEL(VmorpherState)
  144. };
  145. std::array<FormantFilter,4> VmorpherState::getFiltersByPhoneme(VMorpherPhenome phoneme,
  146. float frequency, float pitch)
  147. {
  148. /* Using soprano formant set of values to
  149. * better match mid-range frequency space.
  150. *
  151. * See: https://www.classes.cs.uchicago.edu/archive/1999/spring/CS295/Computing_Resources/Csound/CsManual3.48b1.HTML/Appendices/table3.html
  152. */
  153. switch(phoneme)
  154. {
  155. case VMorpherPhenome::A:
  156. return {{
  157. {( 800 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
  158. {(1150 * pitch) / frequency, 0.501187f}, /* std::pow(10.0f, -6 / 20.0f); */
  159. {(2900 * pitch) / frequency, 0.025118f}, /* std::pow(10.0f, -32 / 20.0f); */
  160. {(3900 * pitch) / frequency, 0.100000f} /* std::pow(10.0f, -20 / 20.0f); */
  161. }};
  162. case VMorpherPhenome::E:
  163. return {{
  164. {( 350 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
  165. {(2000 * pitch) / frequency, 0.100000f}, /* std::pow(10.0f, -20 / 20.0f); */
  166. {(2800 * pitch) / frequency, 0.177827f}, /* std::pow(10.0f, -15 / 20.0f); */
  167. {(3600 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
  168. }};
  169. case VMorpherPhenome::I:
  170. return {{
  171. {( 270 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
  172. {(2140 * pitch) / frequency, 0.251188f}, /* std::pow(10.0f, -12 / 20.0f); */
  173. {(2950 * pitch) / frequency, 0.050118f}, /* std::pow(10.0f, -26 / 20.0f); */
  174. {(3900 * pitch) / frequency, 0.050118f} /* std::pow(10.0f, -26 / 20.0f); */
  175. }};
  176. case VMorpherPhenome::O:
  177. return {{
  178. {( 450 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
  179. {( 800 * pitch) / frequency, 0.281838f}, /* std::pow(10.0f, -11 / 20.0f); */
  180. {(2830 * pitch) / frequency, 0.079432f}, /* std::pow(10.0f, -22 / 20.0f); */
  181. {(3800 * pitch) / frequency, 0.079432f} /* std::pow(10.0f, -22 / 20.0f); */
  182. }};
  183. case VMorpherPhenome::U:
  184. return {{
  185. {( 325 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
  186. {( 700 * pitch) / frequency, 0.158489f}, /* std::pow(10.0f, -16 / 20.0f); */
  187. {(2700 * pitch) / frequency, 0.017782f}, /* std::pow(10.0f, -35 / 20.0f); */
  188. {(3800 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
  189. }};
  190. default:
  191. break;
  192. }
  193. return {};
  194. }
  195. void VmorpherState::deviceUpdate(const DeviceBase*, const Buffer&)
  196. {
  197. for(auto &e : mChans)
  198. {
  199. std::for_each(std::begin(e.Formants[VOWEL_A_INDEX]), std::end(e.Formants[VOWEL_A_INDEX]),
  200. std::mem_fn(&FormantFilter::clear));
  201. std::for_each(std::begin(e.Formants[VOWEL_B_INDEX]), std::end(e.Formants[VOWEL_B_INDEX]),
  202. std::mem_fn(&FormantFilter::clear));
  203. std::fill(std::begin(e.CurrentGains), std::end(e.CurrentGains), 0.0f);
  204. }
  205. }
  206. void VmorpherState::update(const ContextBase *context, const EffectSlot *slot,
  207. const EffectProps *props, const EffectTarget target)
  208. {
  209. const DeviceBase *device{context->mDevice};
  210. const float frequency{static_cast<float>(device->Frequency)};
  211. const float step{props->Vmorpher.Rate / frequency};
  212. mStep = fastf2u(clampf(step*WAVEFORM_FRACONE, 0.0f, float{WAVEFORM_FRACONE-1}));
  213. if(mStep == 0)
  214. mGetSamples = Oscillate<Half>;
  215. else if(props->Vmorpher.Waveform == VMorpherWaveform::Sinusoid)
  216. mGetSamples = Oscillate<Sin>;
  217. else if(props->Vmorpher.Waveform == VMorpherWaveform::Triangle)
  218. mGetSamples = Oscillate<Triangle>;
  219. else /*if(props->Vmorpher.Waveform == VMorpherWaveform::Sawtooth)*/
  220. mGetSamples = Oscillate<Saw>;
  221. const float pitchA{std::pow(2.0f,
  222. static_cast<float>(props->Vmorpher.PhonemeACoarseTuning) / 12.0f)};
  223. const float pitchB{std::pow(2.0f,
  224. static_cast<float>(props->Vmorpher.PhonemeBCoarseTuning) / 12.0f)};
  225. auto vowelA = getFiltersByPhoneme(props->Vmorpher.PhonemeA, frequency, pitchA);
  226. auto vowelB = getFiltersByPhoneme(props->Vmorpher.PhonemeB, frequency, pitchB);
  227. /* Copy the filter coefficients to the input channels. */
  228. for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
  229. {
  230. std::copy(vowelA.begin(), vowelA.end(), std::begin(mChans[i].Formants[VOWEL_A_INDEX]));
  231. std::copy(vowelB.begin(), vowelB.end(), std::begin(mChans[i].Formants[VOWEL_B_INDEX]));
  232. }
  233. mOutTarget = target.Main->Buffer;
  234. auto set_gains = [slot,target](auto &chan, al::span<const float,MaxAmbiChannels> coeffs)
  235. { ComputePanGains(target.Main, coeffs.data(), slot->Gain, chan.TargetGains); };
  236. SetAmbiPanIdentity(std::begin(mChans), slot->Wet.Buffer.size(), set_gains);
  237. }
  238. void VmorpherState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
  239. {
  240. /* Following the EFX specification for a conformant implementation which describes
  241. * the effect as a pair of 4-band formant filters blended together using an LFO.
  242. */
  243. for(size_t base{0u};base < samplesToDo;)
  244. {
  245. const size_t td{minz(MAX_UPDATE_SAMPLES, samplesToDo-base)};
  246. mGetSamples(mLfo, mIndex, mStep, td);
  247. mIndex += static_cast<uint>(mStep * td);
  248. mIndex &= WAVEFORM_FRACMASK;
  249. auto chandata = std::begin(mChans);
  250. for(const auto &input : samplesIn)
  251. {
  252. auto& vowelA = chandata->Formants[VOWEL_A_INDEX];
  253. auto& vowelB = chandata->Formants[VOWEL_B_INDEX];
  254. /* Process first vowel. */
  255. std::fill_n(std::begin(mSampleBufferA), td, 0.0f);
  256. vowelA[0].process(&input[base], mSampleBufferA, td);
  257. vowelA[1].process(&input[base], mSampleBufferA, td);
  258. vowelA[2].process(&input[base], mSampleBufferA, td);
  259. vowelA[3].process(&input[base], mSampleBufferA, td);
  260. /* Process second vowel. */
  261. std::fill_n(std::begin(mSampleBufferB), td, 0.0f);
  262. vowelB[0].process(&input[base], mSampleBufferB, td);
  263. vowelB[1].process(&input[base], mSampleBufferB, td);
  264. vowelB[2].process(&input[base], mSampleBufferB, td);
  265. vowelB[3].process(&input[base], mSampleBufferB, td);
  266. alignas(16) float blended[MAX_UPDATE_SAMPLES];
  267. for(size_t i{0u};i < td;i++)
  268. blended[i] = lerpf(mSampleBufferA[i], mSampleBufferB[i], mLfo[i]);
  269. /* Now, mix the processed sound data to the output. */
  270. MixSamples({blended, td}, samplesOut, chandata->CurrentGains, chandata->TargetGains,
  271. samplesToDo-base, base);
  272. ++chandata;
  273. }
  274. base += td;
  275. }
  276. }
  277. struct VmorpherStateFactory final : public EffectStateFactory {
  278. al::intrusive_ptr<EffectState> create() override
  279. { return al::intrusive_ptr<EffectState>{new VmorpherState{}}; }
  280. };
  281. } // namespace
  282. EffectStateFactory *VmorpherStateFactory_getFactory()
  283. {
  284. static VmorpherStateFactory VmorpherFactory{};
  285. return &VmorpherFactory;
  286. }