loganalysis.py 1.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142
  1. import sys
  2. import re
  3. import requests
  4. import json
  5. # prelines and postlines represent the number of lines of context to include in the output around the error
  6. prelines = 10
  7. postlines = 10
  8. def find_errors_in_log_file():
  9. if len(sys.argv) < 2:
  10. print("Usage: python loganalysis.py <filename>")
  11. return
  12. log_file_path = sys.argv[1]
  13. with open(log_file_path, 'r') as log_file:
  14. log_lines = log_file.readlines()
  15. error_logs = []
  16. for i, line in enumerate(log_lines):
  17. if "error" in line.lower():
  18. start_index = max(0, i - prelines)
  19. end_index = min(len(log_lines), i + postlines + 1)
  20. error_logs.extend(log_lines[start_index:end_index])
  21. return error_logs
  22. error_logs = find_errors_in_log_file()
  23. data = {
  24. "prompt": "\n".join(error_logs),
  25. "model": "mattw/loganalyzer"
  26. }
  27. response = requests.post("http://localhost:11434/api/generate", json=data, stream=True)
  28. for line in response.iter_lines():
  29. if line:
  30. json_data = json.loads(line)
  31. if json_data['done'] == False:
  32. print(json_data['response'], end='', flush=True)