analyzechangelog.py 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. # Copyright (c) 2011 Google Inc. All rights reserved.
  2. #
  3. # Redistribution and use in source and binary forms, with or without
  4. # modification, are permitted provided that the following conditions are
  5. # met:
  6. #
  7. # * Redistributions of source code must retain the above copyright
  8. # notice, this list of conditions and the following disclaimer.
  9. # * Redistributions in binary form must reproduce the above
  10. # copyright notice, this list of conditions and the following disclaimer
  11. # in the documentation and/or other materials provided with the
  12. # distribution.
  13. # * Neither the name of Google Inc. nor the names of its
  14. # contributors may be used to endorse or promote products derived from
  15. # this software without specific prior written permission.
  16. #
  17. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. import json
  29. import re
  30. import time
  31. from webkitpy.common.checkout.scm.detection import SCMDetector
  32. from webkitpy.common.checkout.changelog import ChangeLog
  33. from webkitpy.common.config.contributionareas import ContributionAreas
  34. from webkitpy.common.system.filesystem import FileSystem
  35. from webkitpy.common.system.executive import Executive
  36. from webkitpy.tool.multicommandtool import Command
  37. from webkitpy.tool import steps
  38. class AnalyzeChangeLog(Command):
  39. name = "analyze-changelog"
  40. help_text = "Experimental command for analyzing change logs."
  41. long_help = "This command parses changelogs in a specified directory and summarizes the result as JSON files."
  42. def __init__(self):
  43. options = [
  44. steps.Options.changelog_count,
  45. ]
  46. Command.__init__(self, options=options)
  47. @staticmethod
  48. def _enumerate_changelogs(filesystem, dirname, changelog_count):
  49. changelogs = [filesystem.join(dirname, filename) for filename in filesystem.listdir(dirname) if re.match('^ChangeLog(-(\d{4}-\d{2}-\d{2}))?$', filename)]
  50. # Make sure ChangeLog shows up before ChangeLog-2011-01-01
  51. changelogs = sorted(changelogs, key=lambda filename: filename + 'X', reverse=True)
  52. return changelogs[:changelog_count]
  53. @staticmethod
  54. def _generate_jsons(filesystem, jsons, output_dir):
  55. for filename in jsons:
  56. print ' Generating', filename
  57. filesystem.write_text_file(filesystem.join(output_dir, filename), json.dumps(jsons[filename], indent=2))
  58. def execute(self, options, args, tool):
  59. filesystem = self._tool.filesystem
  60. if len(args) < 1 or not filesystem.exists(args[0]):
  61. print "Need the directory name to look for changelog as the first argument"
  62. return
  63. changelog_dir = filesystem.abspath(args[0])
  64. if len(args) < 2 or not filesystem.exists(args[1]):
  65. print "Need the output directory name as the second argument"
  66. return
  67. output_dir = args[1]
  68. startTime = time.time()
  69. print 'Enumerating ChangeLog files...'
  70. changelogs = AnalyzeChangeLog._enumerate_changelogs(filesystem, changelog_dir, options.changelog_count)
  71. analyzer = ChangeLogAnalyzer(tool, changelogs)
  72. analyzer.analyze()
  73. print 'Generating json files...'
  74. json_files = {
  75. 'summary.json': analyzer.summary(),
  76. 'contributors.json': analyzer.contributors_statistics(),
  77. 'areas.json': analyzer.areas_statistics(),
  78. }
  79. AnalyzeChangeLog._generate_jsons(filesystem, json_files, output_dir)
  80. commands_dir = filesystem.dirname(filesystem.path_to_module(self.__module__))
  81. print commands_dir
  82. filesystem.copyfile(filesystem.join(commands_dir, 'data/summary.html'), filesystem.join(output_dir, 'summary.html'))
  83. tick = time.time() - startTime
  84. print 'Finished in %02dm:%02ds' % (int(tick / 60), int(tick % 60))
  85. class ChangeLogAnalyzer(object):
  86. def __init__(self, host, changelog_paths):
  87. self._changelog_paths = changelog_paths
  88. self._filesystem = host.filesystem
  89. self._contribution_areas = ContributionAreas(host.filesystem)
  90. self._scm = host.scm()
  91. self._parsed_revisions = {}
  92. self._contributors_statistics = {}
  93. self._areas_statistics = dict([(area, {'reviewed': 0, 'unreviewed': 0, 'contributors': {}}) for area in self._contribution_areas.names()])
  94. self._summary = {'reviewed': 0, 'unreviewed': 0}
  95. self._longest_filename = max([len(path) - len(self._scm.checkout_root) for path in changelog_paths])
  96. self._filename = ''
  97. self._length_of_previous_output = 0
  98. def contributors_statistics(self):
  99. return self._contributors_statistics
  100. def areas_statistics(self):
  101. return self._areas_statistics
  102. def summary(self):
  103. return self._summary
  104. def _print_status(self, status):
  105. if self._length_of_previous_output:
  106. print "\r" + " " * self._length_of_previous_output,
  107. new_output = ('%' + str(self._longest_filename) + 's: %s') % (self._filename, status)
  108. print "\r" + new_output,
  109. self._length_of_previous_output = len(new_output)
  110. def _set_filename(self, filename):
  111. if self._filename:
  112. print
  113. self._filename = filename
  114. def analyze(self):
  115. for path in self._changelog_paths:
  116. self._set_filename(self._filesystem.relpath(path, self._scm.checkout_root))
  117. with self._filesystem.open_text_file_for_reading(path) as changelog:
  118. self._print_status('Parsing entries...')
  119. number_of_parsed_entries = self._analyze_entries(ChangeLog.parse_entries_from_file(changelog), path)
  120. self._print_status('Done (%d entries)' % number_of_parsed_entries)
  121. print
  122. self._summary['contributors'] = len(self._contributors_statistics)
  123. self._summary['contributors_with_reviews'] = sum([1 for contributor in self._contributors_statistics.values() if contributor['reviews']['total']])
  124. self._summary['contributors_without_reviews'] = self._summary['contributors'] - self._summary['contributors_with_reviews']
  125. def _collect_statistics_for_contributor_area(self, area, contributor, contribution_type, reviewed):
  126. area_contributors = self._areas_statistics[area]['contributors']
  127. if contributor not in area_contributors:
  128. area_contributors[contributor] = {'reviews': 0, 'reviewed': 0, 'unreviewed': 0}
  129. if contribution_type == 'patches':
  130. contribution_type = 'reviewed' if reviewed else 'unreviewed'
  131. area_contributors[contributor][contribution_type] += 1
  132. def _collect_statistics_for_contributor(self, contributor, contribution_type, areas, touched_files, reviewed):
  133. if contributor not in self._contributors_statistics:
  134. self._contributors_statistics[contributor] = {
  135. 'reviews': {'total': 0, 'areas': {}, 'files': {}},
  136. 'patches': {'reviewed': 0, 'unreviewed': 0, 'areas': {}, 'files': {}}}
  137. statistics = self._contributors_statistics[contributor][contribution_type]
  138. if contribution_type == 'reviews':
  139. statistics['total'] += 1
  140. elif reviewed:
  141. statistics['reviewed'] += 1
  142. else:
  143. statistics['unreviewed'] += 1
  144. for area in areas:
  145. self._increment_dictionary_value(statistics['areas'], area)
  146. self._collect_statistics_for_contributor_area(area, contributor, contribution_type, reviewed)
  147. for touchedfile in touched_files:
  148. self._increment_dictionary_value(statistics['files'], touchedfile)
  149. def _increment_dictionary_value(self, dictionary, key):
  150. dictionary[key] = dictionary.get(key, 0) + 1
  151. def _analyze_entries(self, entries, changelog_path):
  152. dirname = self._filesystem.dirname(changelog_path)
  153. i = 0
  154. for i, entry in enumerate(entries):
  155. self._print_status('(%s) entries' % i)
  156. assert(entry.authors())
  157. touchedfiles_for_entry = [self._filesystem.relpath(self._filesystem.join(dirname, name), self._scm.checkout_root) for name in entry.touched_files()]
  158. areas_for_entry = self._contribution_areas.areas_for_touched_files(touchedfiles_for_entry)
  159. authors_for_entry = entry.authors()
  160. reviewers_for_entry = entry.reviewers()
  161. for reviewer in reviewers_for_entry:
  162. self._collect_statistics_for_contributor(reviewer.full_name, 'reviews', areas_for_entry, touchedfiles_for_entry, reviewed=True)
  163. for author in authors_for_entry:
  164. self._collect_statistics_for_contributor(author['name'], 'patches', areas_for_entry, touchedfiles_for_entry,
  165. reviewed=bool(reviewers_for_entry))
  166. for area in areas_for_entry:
  167. self._areas_statistics[area]['reviewed' if reviewers_for_entry else 'unreviewed'] += 1
  168. self._summary['reviewed' if reviewers_for_entry else 'unreviewed'] += 1
  169. self._print_status('(%s) entries' % i)
  170. return i