email_preprocess.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. #!/usr/bin/python3
  2. import joblib
  3. import numpy
  4. from sklearn.model_selection import train_test_split
  5. from sklearn.feature_extraction.text import TfidfVectorizer
  6. from sklearn.feature_selection import SelectPercentile, f_classif
  7. def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
  8. """
  9. this function takes a pre-made list of email texts (by default word_data.pkl)
  10. and the corresponding authors (by default email_authors.pkl) and performs
  11. a number of preprocessing steps:
  12. -- splits into training/testing sets (10% testing)
  13. -- vectorizes into tfidf matrix
  14. -- selects/keeps most helpful features
  15. after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
  16. 4 objects are returned:
  17. -- training/testing features
  18. -- training/testing labels
  19. """
  20. ### the words (features) and authors (labels), already largely preprocessed
  21. ### this preprocessing will be repeated in the text learning mini-project
  22. authors_file_handler = open(authors_file, "rb")
  23. authors = joblib.load(authors_file_handler)
  24. words_file_handler = open(words_file, "rb")
  25. word_data = joblib.load(words_file_handler)
  26. ### test_size is the percentage of events assigned to the test set
  27. ### (remainder go into training)
  28. features_train, features_test, labels_train, labels_test = train_test_split(word_data, authors, test_size=0.1, random_state=42)
  29. ### text vectorization--go from strings to lists of numbers
  30. vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
  31. features_train_transformed = vectorizer.fit_transform(features_train)
  32. features_test_transformed = vectorizer.transform(features_test)
  33. ### feature selection, because text is super high dimensional and
  34. ### can be really computationally chewy as a result
  35. selector = SelectPercentile(f_classif, percentile=10)
  36. selector.fit(features_train_transformed, labels_train)
  37. features_train_transformed = selector.transform(features_train_transformed).toarray()
  38. features_test_transformed = selector.transform(features_test_transformed).toarray()
  39. ### info on the data
  40. print("No. of Chris training emails : ", sum(labels_train))
  41. print("No. of Sara training emails : ", len(labels_train)-sum(labels_train))
  42. return features_train_transformed, features_test_transformed, labels_train, labels_test