main.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. print('Initializing... Dependencies')
  2. from Conversation.conversation import character_msg_constructor
  3. from vtube_studio import Char_control
  4. import romajitable # temporary use this since It'll blow up our ram if we use Machine Translation Model
  5. import pyaudio
  6. import soundfile as sf
  7. import scipy.io.wavfile as wavfile
  8. import requests
  9. import random
  10. import os
  11. import logging
  12. logging.getLogger("requests").setLevel(logging.WARNING) # make requests logging only important stuff
  13. logging.getLogger("urllib3").setLevel(logging.WARNING) # make requests logging only important stuff
  14. talk = character_msg_constructor("Lilia", None) # initialize character_msg_constructor
  15. # ----------- Waifu Vocal Pipeline -----------------------
  16. from AIVoifu.client_pipeline import tts_pipeline
  17. vocal_pipeline = tts_pipeline()
  18. # initialize Vstudio Waifu Controller
  19. print('Initializing... Vtube Studio')
  20. waifu = Char_control(port=8001, plugin_name='MyBitchIsAI', plugin_developer='HRNPH')
  21. print('Initialized')
  22. # chat api
  23. def chat(msg, reset=False):
  24. command = 'chat'
  25. if reset:
  26. command = 'reset'
  27. params = {
  28. 'command': f'{command}',
  29. 'data': msg,
  30. }
  31. try:
  32. r = requests.get('http://localhost:8267/waifuapi', params=params)
  33. except requests.exceptions.ConnectionError as e:
  34. print('--------- Exception Occured ---------')
  35. print('if you have run the server on different device, please specify the ip address of the server with the port')
  36. print('Example: http://192.168.1.112:8267 or leave it blank to use localhost')
  37. print('***please specify the ip address of the server with the port*** at:')
  38. print(f'*Line {e.__traceback__.tb_lineno}: {e}')
  39. print('-------------------------------------')
  40. exit()
  41. return r.text
  42. split_counter = 0
  43. history = ''
  44. while True:
  45. con = str(input("You: "))
  46. if con.lower() == 'exit':
  47. print('Stopping...')
  48. break # exit prototype
  49. if con.lower() == 'reset':
  50. print('Resetting...')
  51. print(chat('None', reset=True))
  52. continue # reset story skip to next loop
  53. # ----------- Create Response --------------------------
  54. emo_answer = chat(con).replace("\"","") # send message to api
  55. emo, answer = emo_answer.split("<split_token>")
  56. print("**"+emo)
  57. if len(answer) > 2:
  58. use_answer = answer
  59. # ------------------------------------------------------
  60. print(f'Answer: {answer}')
  61. if answer.strip().endswith(f'{talk.name}:') or answer.strip() == '':
  62. continue # skip audio processing if the answer is just the name (no talking)
  63. # ----------- Waifu Create Talking Audio -----------------------
  64. vocal_pipeline.tts(use_answer, save_path=f'./audio_cache/dialog_cache.wav', voice_conversion=True)
  65. # --------------------------------------------------
  66. # ----------- Waifu Talking -----------------------
  67. # play audio directly from cache
  68. p = pyaudio.PyAudio()
  69. data, samplerate = sf.read('./audio_cache/dialog_cache.wav', dtype='float32')
  70. stream = p.open(format=pyaudio.paFloat32,
  71. channels=1,
  72. rate=samplerate,
  73. output=True)
  74. stream.write(data.tobytes())
  75. stream.stop_stream()
  76. stream.close()
  77. # --------------------------------------------------
  78. if emo: ## express emotion
  79. waifu.express(emo) # express emotion in Vtube Studio
  80. # --------------------------------------------------