index.js 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. const VoiceEngine = require('./discord_voice_'+process.platform+'.node');
  2. const fs = require('fs');
  3. const path = require('path');
  4. const yargs = require('yargs');
  5. const isElectronRenderer =
  6. typeof window !== 'undefined' && window != null && window.DiscordNative && window.DiscordNative.isRenderer;
  7. const appSettings = isElectronRenderer ? window.DiscordNative.settings : global.appSettings;
  8. const features = isElectronRenderer ? window.DiscordNative.features : global.features;
  9. const mainArgv = isElectronRenderer ? window.DiscordNative.processUtils.getMainArgvSync() : [];
  10. let dataDirectory;
  11. try {
  12. dataDirectory =
  13. isElectronRenderer && window.DiscordNative.fileManager.getModuleDataPathSync
  14. ? path.join(window.DiscordNative.fileManager.getModuleDataPathSync(), 'discord_voice')
  15. : null;
  16. } catch (e) {
  17. console.error('Failed to get data directory: ', e);
  18. }
  19. const releaseChannel = isElectronRenderer ? window.DiscordNative.app.getReleaseChannel() : '';
  20. const useLegacyAudioDevice = appSettings ? appSettings.getSync('useLegacyAudioDevice') : false;
  21. const audioSubsystemSelected = appSettings
  22. ? appSettings.getSync('audioSubsystem') === 'legacy'
  23. ? 'legacy'
  24. : 'standard'
  25. : 'standard';
  26. const audioSubsystem = useLegacyAudioDevice || audioSubsystemSelected;
  27. const debugLogging = appSettings ? appSettings.getSync('debugLogging') : false;
  28. const argv = yargs(mainArgv.slice(1))
  29. .describe('log-level', 'Logging level.')
  30. .default('log-level', -1)
  31. .help('h')
  32. .alias('h', 'help')
  33. .exitProcess(false).argv;
  34. const logLevel = argv['log-level'] == -1 ? (debugLogging ? 2 : -1) : argv['log-level'];
  35. if (dataDirectory != null) {
  36. try {
  37. fs.mkdirSync(dataDirectory, {recursive: true});
  38. } catch (e) {
  39. console.warn("Couldn't create voice data directory ", dataDirectory, ':', e);
  40. }
  41. }
  42. if (debugLogging && console.discordVoiceHooked == null) {
  43. console.discordVoiceHooked = true;
  44. for (const logFn of ['trace', 'debug', 'info', 'warn', 'error', 'log']) {
  45. const originalLogFn = console[logFn];
  46. if (originalLogFn != null) {
  47. console[logFn] = function () {
  48. originalLogFn.apply(this, arguments);
  49. try {
  50. VoiceEngine.consoleLog(
  51. logFn,
  52. JSON.stringify(Array.from(arguments).map((v) => (v != null ? v.toString() : v)))
  53. );
  54. } catch (e) {
  55. // Drop errors from toString()/stringify.
  56. }
  57. };
  58. }
  59. }
  60. }
  61. features.declareSupported('voice_panning');
  62. features.declareSupported('voice_multiple_connections');
  63. features.declareSupported('media_devices');
  64. features.declareSupported('media_video');
  65. features.declareSupported('debug_logging');
  66. features.declareSupported('set_audio_device_by_id');
  67. features.declareSupported('set_video_device_by_id');
  68. features.declareSupported('loopback');
  69. features.declareSupported('experiment_config');
  70. features.declareSupported('remote_locus_network_control');
  71. features.declareSupported('connection_replay');
  72. features.declareSupported('simulcast');
  73. features.declareSupported('direct_video');
  74. if (process.platform === 'win32') {
  75. features.declareSupported('voice_legacy_subsystem');
  76. features.declareSupported('soundshare');
  77. features.declareSupported('wumpus_video');
  78. features.declareSupported('hybrid_video');
  79. features.declareSupported('elevated_hook');
  80. features.declareSupported('soundshare_loopback');
  81. features.declareSupported('screen_previews');
  82. features.declareSupported('window_previews');
  83. features.declareSupported('audio_debug_state');
  84. features.declareSupported('video_effects');
  85. // NOTE(jvass): currently there's no experimental encoders! Add this back if you
  86. // add one and want to re-enable the UI for them.
  87. // features.declareSupported('experimental_encoders');
  88. }
  89. function bindConnectionInstance(instance) {
  90. return {
  91. destroy: () => instance.destroy(),
  92. setTransportOptions: (options) => instance.setTransportOptions(options),
  93. setSelfMute: (mute) => instance.setSelfMute(mute),
  94. setSelfDeafen: (deaf) => instance.setSelfDeafen(deaf),
  95. mergeUsers: (users) => instance.mergeUsers(users),
  96. destroyUser: (userId) => instance.destroyUser(userId),
  97. setLocalVolume: (userId, volume) => instance.setLocalVolume(userId, volume),
  98. setLocalMute: (userId, mute) => instance.setLocalMute(userId, mute),
  99. setLocalPan: (userId, left, right) => instance.setLocalPan(userId, left, right),
  100. setDisableLocalVideo: (userId, disabled) => instance.setDisableLocalVideo(userId, disabled),
  101. setMinimumOutputDelay: (delay) => instance.setMinimumOutputDelay(delay),
  102. getEncryptionModes: (callback) => instance.getEncryptionModes(callback),
  103. configureConnectionRetries: (baseDelay, maxDelay, maxAttempts) =>
  104. instance.configureConnectionRetries(baseDelay, maxDelay, maxAttempts),
  105. setOnSpeakingCallback: (callback) => instance.setOnSpeakingCallback(callback),
  106. setOnSpeakingWhileMutedCallback: (callback) => instance.setOnSpeakingWhileMutedCallback(callback),
  107. setPingInterval: (interval) => instance.setPingInterval(interval),
  108. setPingCallback: (callback) => instance.setPingCallback(callback),
  109. setPingTimeoutCallback: (callback) => instance.setPingTimeoutCallback(callback),
  110. setRemoteUserSpeakingStatus: (userId, speaking) => instance.setRemoteUserSpeakingStatus(userId, speaking),
  111. setRemoteUserCanHavePriority: (userId, canHavePriority) =>
  112. instance.setRemoteUserCanHavePriority(userId, canHavePriority),
  113. setOnVideoCallback: (callback) => instance.setOnVideoCallback(callback),
  114. setVideoBroadcast: (broadcasting) => instance.setVideoBroadcast(broadcasting),
  115. setDesktopSource: (id, videoHook, type) => instance.setDesktopSource(id, videoHook, type),
  116. setDesktopSourceStatusCallback: (callback) => instance.setDesktopSourceStatusCallback(callback),
  117. setOnDesktopSourceEnded: (callback) => instance.setOnDesktopSourceEnded(callback),
  118. setOnSoundshare: (callback) => instance.setOnSoundshare(callback),
  119. setOnSoundshareEnded: (callback) => instance.setOnSoundshareEnded(callback),
  120. setOnSoundshareFailed: (callback) => instance.setOnSoundshareFailed(callback),
  121. setPTTActive: (active, priority) => instance.setPTTActive(active, priority),
  122. getStats: (callback) => instance.getStats(callback),
  123. getFilteredStats: (filter, callback) => instance.getFilteredStats(filter, callback),
  124. startReplay: () => instance.startReplay(),
  125. };
  126. }
  127. VoiceEngine.createTransport = VoiceEngine._createTransport;
  128. if (isElectronRenderer) {
  129. VoiceEngine.setImageDataAllocator((width, height) => new window.ImageData(width, height));
  130. }
  131. VoiceEngine.createVoiceConnection = function (audioSSRC, userId, address, port, onConnectCallback, experiments, rids) {
  132. let instance = null;
  133. if (rids != null) {
  134. instance = new VoiceEngine.VoiceConnection(audioSSRC, userId, address, port, onConnectCallback, experiments, rids);
  135. } else if (experiments != null) {
  136. instance = new VoiceEngine.VoiceConnection(audioSSRC, userId, address, port, onConnectCallback, experiments);
  137. } else {
  138. instance = new VoiceEngine.VoiceConnection(audioSSRC, userId, address, port, onConnectCallback);
  139. }
  140. return bindConnectionInstance(instance);
  141. };
  142. VoiceEngine.createOwnStreamConnection = VoiceEngine.createVoiceConnection;
  143. VoiceEngine.createReplayConnection = function (audioEngineId, callback, replayLog) {
  144. if (replayLog == null) {
  145. return null;
  146. }
  147. return bindConnectionInstance(new VoiceEngine.VoiceReplayConnection(replayLog, audioEngineId, callback));
  148. };
  149. VoiceEngine.setAudioSubsystem = function (subsystem) {
  150. if (appSettings == null) {
  151. console.warn('Unable to access app settings.');
  152. return;
  153. }
  154. // TODO: With experiment controlling ADM selection, this may be incorrect since
  155. // audioSubsystem is read from settings (or default if does not exists)
  156. // and not the actual ADM used.
  157. if (subsystem === audioSubsystem) {
  158. return;
  159. }
  160. appSettings.set('audioSubsystem', subsystem);
  161. appSettings.set('useLegacyAudioDevice', false);
  162. if (isElectronRenderer) {
  163. window.DiscordNative.app.relaunch();
  164. }
  165. };
  166. VoiceEngine.setDebugLogging = function (enable) {
  167. if (appSettings == null) {
  168. console.warn('Unable to access app settings.');
  169. return;
  170. }
  171. if (debugLogging === enable) {
  172. return;
  173. }
  174. appSettings.set('debugLogging', enable);
  175. if (isElectronRenderer) {
  176. window.DiscordNative.app.relaunch();
  177. }
  178. };
  179. VoiceEngine.getDebugLogging = function () {
  180. return debugLogging;
  181. };
  182. const videoStreams = {};
  183. const ensureCanvasContext = function (sinkId) {
  184. let canvas = document.getElementById(sinkId);
  185. if (canvas == null) {
  186. for (const popout of window.popouts.values()) {
  187. const element = popout.document != null && popout.document.getElementById(sinkId);
  188. if (element != null) {
  189. canvas = element;
  190. break;
  191. }
  192. }
  193. if (canvas == null) {
  194. return null;
  195. }
  196. }
  197. const context = canvas.getContext('2d');
  198. if (context == null) {
  199. console.log(`Failed to initialize context for sinkId ${sinkId}`);
  200. return null;
  201. }
  202. return context;
  203. };
  204. // [adill] NB: with context isolation it has become extremely costly (both memory & performance) to provide the image
  205. // data directly to clients at any reasonably fast interval so we've replaced setVideoOutputSink with a direct canvas
  206. // renderer via addVideoOutputSink
  207. const setVideoOutputSink = VoiceEngine.setVideoOutputSink;
  208. const clearVideoOutputSink = (streamId) => {
  209. // [adill] NB: if you don't pass a frame callback setVideoOutputSink clears the sink
  210. setVideoOutputSink(streamId);
  211. };
  212. const signalVideoOutputSinkReady = VoiceEngine.signalVideoOutputSinkReady;
  213. delete VoiceEngine.setVideoOutputSink;
  214. delete VoiceEngine.signalVideoOutputSinkReady;
  215. function addVideoOutputSinkInternal(sinkId, streamId, frameCallback) {
  216. let sinks = videoStreams[streamId];
  217. if (sinks == null) {
  218. sinks = videoStreams[streamId] = new Map();
  219. }
  220. if (sinks.size === 0) {
  221. console.log(`Subscribing to frames for streamId ${streamId}`);
  222. const onFrame = (imageData) => {
  223. const sinks = videoStreams[streamId];
  224. if (sinks != null) {
  225. for (const callback of sinks.values()) {
  226. if (callback != null) {
  227. callback(imageData);
  228. }
  229. }
  230. }
  231. signalVideoOutputSinkReady(streamId);
  232. };
  233. setVideoOutputSink(streamId, onFrame, true);
  234. }
  235. sinks.set(sinkId, frameCallback);
  236. }
  237. VoiceEngine.addVideoOutputSink = function (sinkId, streamId, frameCallback) {
  238. let canvasContext = null;
  239. addVideoOutputSinkInternal(sinkId, streamId, (imageData) => {
  240. if (canvasContext == null) {
  241. canvasContext = ensureCanvasContext(sinkId);
  242. if (canvasContext == null) {
  243. return;
  244. }
  245. }
  246. if (frameCallback != null) {
  247. frameCallback(imageData.width, imageData.height);
  248. }
  249. // [adill] NB: Electron 9+ on macOS would show massive leaks in the the GPU helper process when a non-Discord
  250. // window completely occludes the Discord window. Adding this tiny readback ameliorates the issue. We tried WebGL
  251. // rendering which did not exhibit the issue, however, the context limit of 16 was too small to be a real
  252. // alternative.
  253. const leak = canvasContext.getImageData(0, 0, 1, 1);
  254. canvasContext.putImageData(imageData, 0, 0);
  255. });
  256. };
  257. VoiceEngine.removeVideoOutputSink = function (sinkId, streamId) {
  258. const sinks = videoStreams[streamId];
  259. if (sinks != null) {
  260. sinks.delete(sinkId);
  261. if (sinks.size === 0) {
  262. delete videoStreams[streamId];
  263. console.log(`Unsubscribing from frames for streamId ${streamId}`);
  264. clearVideoOutputSink(streamId);
  265. }
  266. }
  267. };
  268. let sinkId = 0;
  269. VoiceEngine.getNextVideoOutputFrame = function (streamId) {
  270. const nextVideoFrameSinkId = `getNextVideoFrame_${++sinkId}`;
  271. return new Promise((resolve, reject) => {
  272. setTimeout(() => {
  273. VoiceEngine.removeVideoOutputSink(nextVideoFrameSinkId, streamId);
  274. reject(new Error('getNextVideoOutputFrame timeout'));
  275. }, 5000);
  276. addVideoOutputSinkInternal(nextVideoFrameSinkId, streamId, (imageData) => {
  277. VoiceEngine.removeVideoOutputSink(nextVideoFrameSinkId, streamId);
  278. resolve({
  279. width: imageData.width,
  280. height: imageData.height,
  281. data: new Uint8ClampedArray(imageData.data.buffer),
  282. });
  283. });
  284. });
  285. };
  286. console.log(`Initializing voice engine with audio subsystem: ${audioSubsystem}`);
  287. VoiceEngine.initialize({audioSubsystem, logLevel, dataDirectory});
  288. module.exports = VoiceEngine;