base_test_impact.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. #
  2. # Copyright (c) Contributors to the Open 3D Engine Project.
  3. # For complete copyright and license terms please see the LICENSE at the root of this distribution.
  4. #
  5. # SPDX-License-Identifier: Apache-2.0 OR MIT
  6. #
  7. #
  8. from abc import ABC, abstractmethod
  9. from pathlib import PurePath, Path
  10. import json
  11. import subprocess
  12. import re
  13. import uuid
  14. from test_impact import RuntimeArgs
  15. from git_utils import Repo
  16. from persistent_storage import PersistentStorageLocal, PersistentStorageS3
  17. from tiaf_tools import get_logger
  18. import tiaf_report_constants as constants
  19. logger = get_logger(__file__)
  20. # Constants to access our argument dictionary for the values of different arguments. Not guarunteed to be in dictionary in all cases.
  21. ARG_S3_BUCKET = 's3_bucket'
  22. ARG_SUITES = 'suites'
  23. ARG_LABEL_EXCLUDES = 'label_excludes'
  24. ARG_CONFIG = 'config'
  25. ARG_SOURCE_BRANCH = 'src_branch'
  26. ARG_DESTINATION_BRANCH = 'dst_branch'
  27. ARG_COMMIT = 'commit'
  28. ARG_S3_TOP_LEVEL_DIR = 's3_top_level_dir'
  29. ARG_SEQUENCE_OVERRIDE = 'sequence_override'
  30. ARG_INTEGRATION_POLICY = RuntimeArgs.COMMON_IPOLICY.driver_argument
  31. ARG_TEST_FAILURE_POLICY = RuntimeArgs.COMMON_FPOLICY.driver_argument
  32. ARG_CHANGE_LIST = RuntimeArgs.COMMON_CHANGELIST.driver_argument
  33. ARG_SEQUENCE = RuntimeArgs.COMMON_SEQUENCE.driver_argument
  34. ARG_REPORT = RuntimeArgs.COMMON_REPORT.driver_argument
  35. # Sequence types as constants
  36. TIA_NOWRITE = 'tianowrite'
  37. TIA_SEED = 'seed'
  38. TIA_ON = 'tia'
  39. TIA_REGULAR = 'regular'
  40. class BaseTestImpact(ABC):
  41. _runtime_type = None
  42. def __init__(self, args: dict):
  43. """
  44. Initializes the test impact model with the commit, branches as runtime configuration.
  45. @param args: The arguments to be parsed and applied to this TestImpact object.
  46. """
  47. self._runtime_args = []
  48. self._change_list = {"createdFiles": [],
  49. "updatedFiles": [], "deletedFiles": []}
  50. self._has_change_list = False
  51. self._enabled = False
  52. self._use_test_impact_analysis = False
  53. # Unique instance id to be used as part of the report name.
  54. self._instance_id = uuid.uuid4().hex
  55. self._s3_bucket = args.get(ARG_S3_BUCKET)
  56. self._suites = args.get(ARG_SUITES)
  57. self._label_excludes = args.get(ARG_LABEL_EXCLUDES)
  58. # Compile the dash-separated concatenation of the ordered suites and labels to be used as path components
  59. self._suites_string = '-'.join(self._suites) if isinstance(self._suites, list) else self._suites
  60. self._label_excludes_string = '-'.join(self._label_excludes) if isinstance(self._label_excludes, list) else self._label_excludes
  61. self._config = self._parse_config_file(args.get(ARG_CONFIG))
  62. if not self._enabled:
  63. logger.info(f"TIAF is disabled.")
  64. return
  65. # Initialize branches
  66. self._src_branch = args.get(ARG_SOURCE_BRANCH)
  67. self._dst_branch = args.get(ARG_DESTINATION_BRANCH)
  68. logger.info(f"Source branch: '{self._src_branch}'.")
  69. logger.info(f"Destination branch: '{self._dst_branch}'.")
  70. # Determine our source of truth. Also intializes our source of truth property.
  71. self._determine_source_of_truth()
  72. # Initialize commit info
  73. self._dst_commit = args.get(ARG_COMMIT)
  74. logger.info(f"Commit: '{self._dst_commit}'.")
  75. self._src_commit = None
  76. self._commit_distance = None
  77. sequence_type = self._default_sequence_type
  78. # If flag is set for us to use TIAF
  79. if self._use_test_impact_analysis:
  80. logger.info("Test impact analysis is enabled.")
  81. self._persistent_storage = self._initialize_persistent_storage(s3_top_level_dir=args.get(ARG_S3_TOP_LEVEL_DIR))
  82. # If persistent storage intialized correctly
  83. if self._persistent_storage:
  84. # Historic Data Handling:
  85. # This flag is used to help handle our corner cases if we have historic data.
  86. # NOTE: need to draft in failing tests or only update upon success otherwise reruns for failed runs will have the same last commit
  87. # hash as the commit and generate an empty changelist
  88. self._can_rerun_with_instrumentation = True
  89. if self._persistent_storage.has_historic_data:
  90. logger.info("Historic data found.")
  91. self._handle_historic_data()
  92. else:
  93. logger.info("No historic data found.")
  94. # Determining our sequence type:
  95. if self._has_change_list:
  96. if self._is_source_of_truth_branch:
  97. # Use TIA sequence (instrumented subset of tests) for coverage updating branches so we can update the coverage data with the generated coverage
  98. sequence_type = TIA_ON
  99. else:
  100. # Use TIA no-write sequence (regular subset of tests) for non coverage updating branches
  101. sequence_type = TIA_NOWRITE
  102. # Ignore integrity failures for non coverage updating branches as our confidence in the
  103. args[ARG_INTEGRATION_POLICY] = "continue"
  104. args[ARG_CHANGE_LIST] = self._change_list_path
  105. else:
  106. if self._is_source_of_truth_branch and self._can_rerun_with_instrumentation:
  107. # Use seed sequence (instrumented all tests) for coverage updating branches so we can generate the coverage bed for future sequences
  108. sequence_type = TIA_SEED
  109. # We always continue after test failures when seeding to ensure we capture the coverage for all test targets
  110. args[ARG_TEST_FAILURE_POLICY] = "continue"
  111. else:
  112. # Use regular sequence (regular all tests) for non coverage updating branches as we have no coverage to use nor coverage to update
  113. sequence_type = TIA_REGULAR
  114. # Ignore integrity failures for non coverage updating branches as our confidence in the
  115. args[ARG_INTEGRATION_POLICY] = "continue"
  116. # Store sequence and report into args so that our argument enum can be used to apply all relevant arguments.
  117. args[ARG_SEQUENCE] = args.get(ARG_SEQUENCE_OVERRIDE) or sequence_type
  118. self._report_file = PurePath(self._report_workspace).joinpath(
  119. f"report.{self._instance_id}.json")
  120. args[ARG_REPORT] = self._report_file
  121. self._parse_arguments_to_runtime(args)
  122. def _parse_arguments_to_runtime(self, args):
  123. """
  124. Fetches the relevant keys from the provided dictionary, and applies the values of the arguments(or applies them as a flag) to our runtime_args list.
  125. @param args: Dictionary containing the arguments passed to this TestImpact object. Will contain all the runtime arguments we need to apply.
  126. """
  127. for argument in RuntimeArgs:
  128. value = args.get(argument.driver_argument)
  129. if value:
  130. self._runtime_args.append(f"{argument.runtime_arg}{','.join(value) if isinstance(value, list) else value}")
  131. logger.info(f"{argument.message}{value}")
  132. def _handle_historic_data(self):
  133. """
  134. This method handles the different cases of when we have historic data, and carries out the desired action.
  135. Case 1:
  136. This commit is different to the last commit in our historic data. Action: Generate change-list.
  137. Case 2:
  138. This commit has already been run in TIAF, and we have useful historic data. Action: Use that data for our TIAF run.
  139. Case 3:
  140. This commit has already been run in TIAF, but we have no useful historic data for it. Action: A regular sequence is performed instead. Persistent storage is set to none and rerun_with_instrumentation is set to false.
  141. """
  142. # src commit is set to the commit hash of the last commit we have historic data for
  143. self._src_commit = self._persistent_storage.last_commit_hash
  144. # Check to see if this is a re-run for this commit before any other changes have come in
  145. # If the last commit hash in our historic data is the same as our current commit hash
  146. if self._persistent_storage.is_last_commit_hash_equal_to_this_commit_hash:
  147. # If we have the last commit hash of our previous run in our json then we will just use the data from that run
  148. if self._persistent_storage.has_previous_last_commit_hash:
  149. logger.info(
  150. f"This sequence is being re-run before any other changes have come in so the last commit '{self._persistent_storage.this_commit_last_commit_hash}' used for the previous sequence will be used instead.")
  151. self._src_commit = self._persistent_storage.this_commit_last_commit_hash
  152. else:
  153. # If we don't have the last commit hash of our previous run then we do a regular run as there will be no change list and no historic coverage data to use
  154. logger.info(
  155. f"This sequence is being re-run before any other changes have come in but there is no useful historic data. A regular sequence will be performed instead.")
  156. self._persistent_storage = None
  157. self._can_rerun_with_instrumentation = False
  158. else:
  159. # If this commit is different to the last commit in our historic data, we can diff the commits to get our change list
  160. self._attempt_to_generate_change_list()
  161. def _initialize_persistent_storage(self, s3_top_level_dir: str = None):
  162. """
  163. Initialise our persistent storage object. Defaults to initialising local storage, unless the s3_bucket argument is not None.
  164. Returns PersistentStorage object or None if initialisation failed.
  165. @param s3_top_level_dir: The name of the top level directory to use in the s3 bucket.
  166. @returns: Returns a persistent storage object, or None if a SystemError exception occurs while initialising the object.
  167. """
  168. try:
  169. if self._s3_bucket:
  170. return PersistentStorageS3(
  171. self._config, self._suites_string, self._dst_commit, self._s3_bucket, self._compile_s3_top_level_dir_name(s3_top_level_dir), self._source_of_truth_branch, self._active_workspace, self._unpacked_coverage_data_file, self._previous_test_run_data_file, self._temp_workspace)
  172. else:
  173. return PersistentStorageLocal(
  174. self._config, self._suites_string, self._dst_commit, self._active_workspace, self._unpacked_coverage_data_file, self._previous_test_run_data_file, self._historic_workspace, self._historic_data_file, self._temp_workspace)
  175. except SystemError as e:
  176. logger.warning(
  177. f"The persistent storage encountered an irrecoverable error, test impact analysis will be disabled: '{e}'")
  178. return None
  179. def _determine_source_of_truth(self):
  180. """
  181. Determines whether the branch we are executing TIAF on is the source of truth (the branch from which the coverage data will be stored/retrieved from) or not.
  182. """
  183. # Source of truth (the branch from which the coverage data will be stored/retrieved from)
  184. if not self._dst_branch or self._src_branch == self._dst_branch:
  185. # Branch builds are their own source of truth and will update the coverage data for the source of truth after any instrumented sequences complete
  186. self._source_of_truth_branch = self._src_branch
  187. else:
  188. # Pull request builds use their destination as the source of truth and never update the coverage data for the source of truth
  189. self._source_of_truth_branch = self._dst_branch
  190. logger.info(
  191. f"Source of truth branch: '{self._source_of_truth_branch}'.")
  192. logger.info(
  193. f"Is source of truth branch: '{self._is_source_of_truth_branch}'.")
  194. def _parse_config_file(self, config_file: str):
  195. """
  196. Parse the configuration file and retrieve the data needed for launching the test impact analysis runtime.
  197. @param config_file: The runtime config file to obtain the runtime configuration data from.
  198. """
  199. COMMON_CONFIG_KEY = "common"
  200. WORKSPACE_KEY = "workspace"
  201. HISTORIC_SEQUENCES_KEY = "historic_sequences"
  202. ACTIVE_KEY = "active"
  203. ROOT_KEY = "root"
  204. TEMP_KEY = "temp"
  205. REPO_KEY = "repo"
  206. HISTORIC_KEY = "historic"
  207. RELATIVE_PATHS_KEY = "relative_paths"
  208. TEST_IMPACT_DATA_FILE_KEY = "test_impact_data_file"
  209. PREVIOUS_TEST_RUN_DATA_FILE_KEY = "previous_test_run_data_file"
  210. LAST_COMMIT_HASH_KEY = "last_commit_hash"
  211. COVERAGE_DATA_KEY = "coverage_data"
  212. PREVIOUS_TEST_RUNS_KEY = "previous_test_runs"
  213. HISTORIC_DATA_FILE_KEY = "data"
  214. JENKINS_KEY = "jenkins"
  215. ENABLED_KEY = "enabled"
  216. USE_TEST_IMPACT_ANALYSIS_KEY = "use_test_impact_analysis"
  217. RUNTIME_BIN_KEY = "runtime_bin"
  218. RUNTIME_ARTIFACT_DIR_KEY = "run_artifact_dir"
  219. RUNTIME_COVERAGE_DIR_KEY = "coverage_artifact_dir"
  220. REPORT_KEY = "reports"
  221. CHANGE_LIST_KEY = "change_list"
  222. logger.info(
  223. f"Attempting to parse configuration file '{config_file}'...")
  224. try:
  225. with open(config_file, "r") as config_data:
  226. config = json.load(config_data)
  227. self._repo_dir = config[COMMON_CONFIG_KEY][REPO_KEY][ROOT_KEY]
  228. self._repo = Repo(self._repo_dir)
  229. # TIAF
  230. self._enabled = config[self.runtime_type][JENKINS_KEY][ENABLED_KEY]
  231. self._use_test_impact_analysis = config[self.runtime_type][JENKINS_KEY][USE_TEST_IMPACT_ANALYSIS_KEY]
  232. self._tiaf_bin = Path(
  233. config[self.runtime_type][RUNTIME_BIN_KEY])
  234. if self._use_test_impact_analysis and not self._tiaf_bin.is_file():
  235. logger.warning(
  236. f"Could not find TIAF binary at location {self._tiaf_bin}, TIAF will be turned off.")
  237. self._use_test_impact_analysis = False
  238. else:
  239. logger.info(
  240. f"Runtime binary found at location '{self._tiaf_bin}'")
  241. # Workspaces
  242. self._active_workspace = config[self.runtime_type][WORKSPACE_KEY][ACTIVE_KEY][ROOT_KEY]
  243. self._historic_workspace = config[self.runtime_type][WORKSPACE_KEY][HISTORIC_KEY][ROOT_KEY]
  244. self._temp_workspace = config[self.runtime_type][WORKSPACE_KEY][TEMP_KEY][ROOT_KEY]
  245. self._report_workspace = config[self.runtime_type][WORKSPACE_KEY][TEMP_KEY][REPORT_KEY]
  246. self._change_list_workspace = config[self.runtime_type][WORKSPACE_KEY][TEMP_KEY][CHANGE_LIST_KEY]
  247. # Data file paths
  248. self._unpacked_coverage_data_file = config[self.runtime_type][
  249. WORKSPACE_KEY][ACTIVE_KEY][RELATIVE_PATHS_KEY][TEST_IMPACT_DATA_FILE_KEY]
  250. self._previous_test_run_data_file = config[self.runtime_type][WORKSPACE_KEY][
  251. ACTIVE_KEY][RELATIVE_PATHS_KEY][PREVIOUS_TEST_RUN_DATA_FILE_KEY]
  252. self._historic_data_file = config[self.runtime_type][WORKSPACE_KEY][
  253. HISTORIC_KEY][RELATIVE_PATHS_KEY][HISTORIC_DATA_FILE_KEY]
  254. # Runtime artifact and coverage directories
  255. self._runtime_artifact_directory = config[self.runtime_type][WORKSPACE_KEY][TEMP_KEY][RUNTIME_ARTIFACT_DIR_KEY]
  256. self._runtime_coverage_directory = config[self.runtime_type][WORKSPACE_KEY][TEMP_KEY][RUNTIME_COVERAGE_DIR_KEY]
  257. logger.info("The configuration file was parsed successfully.")
  258. return config
  259. except KeyError as e:
  260. logger.error(f"The config does not contain the key {str(e)}.")
  261. return None
  262. except json.JSONDecodeError as e:
  263. logger.error("The config file doesn not contain valid JSON")
  264. raise SystemError(
  265. "Config file does not contain valid JSON, stopping TIAF")
  266. def _attempt_to_generate_change_list(self):
  267. """
  268. Attempts to determine the change list between now and the last tiaf run (if any).
  269. """
  270. self._has_change_list = False
  271. self._change_list_path = None
  272. # Check whether or not a previous commit hash exists (no hash is not a failure)
  273. if self._src_commit:
  274. if self._is_source_of_truth_branch:
  275. # For branch builds, the dst commit must be descended from the src commit
  276. if not self._repo.is_descendent(self._src_commit, self._dst_commit):
  277. logger.error(
  278. f"Source commit '{self._src_commit}' and destination commit '{self._dst_commit}' must be related for branch builds.")
  279. return
  280. # Calculate the distance (in commits) between the src and dst commits
  281. self._commit_distance = self._repo.commit_distance(
  282. self._src_commit, self._dst_commit)
  283. logger.info(
  284. f"The distance between '{self._src_commit}' and '{self._dst_commit}' commits is '{self._commit_distance}' commits.")
  285. multi_branch = False
  286. else:
  287. # For pull request builds, the src and dst commits are on different branches so we need to ensure a common ancestor is used for the diff
  288. multi_branch = True
  289. try:
  290. # Attempt to generate a diff between the src and dst commits
  291. logger.info(
  292. f"Source '{self._src_commit}' and destination '{self._dst_commit}' will be diff'd.")
  293. diff_path = Path(PurePath(self._change_list_workspace).joinpath(
  294. f"changelist.{self._instance_id}.diff"))
  295. self._repo.create_diff_file(
  296. self._src_commit, self._dst_commit, diff_path, multi_branch)
  297. except RuntimeError as e:
  298. logger.error(e)
  299. return
  300. # A diff was generated, attempt to parse the diff and construct the change list
  301. logger.info(
  302. f"Generated diff between commits '{self._src_commit}' and '{self._dst_commit}': '{diff_path}'.")
  303. with open(diff_path, "r") as diff_data:
  304. lines = diff_data.readlines()
  305. for line in lines:
  306. match = re.split("^R[0-9]+\\s(\\S+)\\s(\\S+)", line)
  307. if len(match) > 1:
  308. # File rename
  309. # Treat renames as a deletion and an addition
  310. self._change_list["deletedFiles"].append(match[1])
  311. self._change_list["createdFiles"].append(match[2])
  312. else:
  313. match = re.split("^[AMD]\\s(\\S+)", line)
  314. if len(match) > 1:
  315. if line[0] == 'A':
  316. # File addition
  317. self._change_list["createdFiles"].append(
  318. match[1])
  319. elif line[0] == 'M':
  320. # File modification
  321. self._change_list["updatedFiles"].append(
  322. match[1])
  323. elif line[0] == 'D':
  324. # File Deletion
  325. self._change_list["deletedFiles"].append(
  326. match[1])
  327. # Serialize the change list to the JSON format the test impact analysis runtime expects
  328. change_list_json = json.dumps(self._change_list, indent=4)
  329. change_list_path = PurePath(self._temp_workspace).joinpath(
  330. f"changelist.{self._instance_id}.json")
  331. f = open(change_list_path, "w")
  332. f.write(change_list_json)
  333. f.close()
  334. logger.info(
  335. f"Change list constructed successfully: '{change_list_path}'.")
  336. logger.info(
  337. f"{len(self._change_list['createdFiles'])} created files, {len(self._change_list['updatedFiles'])} updated files and {len(self._change_list['deletedFiles'])} deleted files.")
  338. # Note: an empty change list generated due to no changes between last and current commit is valid
  339. self._has_change_list = True
  340. self._change_list_path = change_list_path
  341. else:
  342. logger.error(
  343. "No previous commit hash found, regular or seeded sequences only will be run.")
  344. self._has_change_list = False
  345. return
  346. def _generate_result(self, return_code: int, report: dict):
  347. """
  348. Generates the result object from the pertinent runtime meta-data and sequence report.
  349. @param The generated result object.
  350. """
  351. result = {}
  352. result[constants.SRC_COMMIT_KEY] = self._src_commit
  353. result[constants.DST_COMMIT_KEY] = self._dst_commit
  354. result[constants.COMMIT_DISTANCE_KEY] = self._commit_distance
  355. result[constants.SRC_BRANCH_KEY] = self._src_branch
  356. result[constants.DST_BRANCH_KEY] = self._dst_branch
  357. result[constants.SUITES_KEY] = self._suites
  358. result[constants.LABEL_EXCLUDES_KEY] = self._label_excludes
  359. result[constants.USE_TEST_IMPACT_ANALYSIS_KEY] = self._use_test_impact_analysis
  360. result[constants.SOURCE_OF_TRUTH_BRANCH_KEY] = self._source_of_truth_branch
  361. result[constants.IS_SOURCE_OF_TRUTH_BRANCH_KEY] = self._is_source_of_truth_branch
  362. result[constants.HAS_CHANGE_LIST_KEY] = self._has_change_list
  363. result[constants.HAS_HISTORIC_DATA_KEY] = self._has_historic_data
  364. result[constants.S3_BUCKET_KEY] = self._s3_bucket
  365. result[constants.RUNTIME_ARGS_KEY] = self._runtime_args
  366. result[constants.RUNTIME_RETURN_CODE_KEY] = return_code
  367. result[constants.REPORT_KEY] = report
  368. result[constants.CHANGE_LIST_KEY] = self._change_list
  369. result[constants.RUNTIME_TYPE_KEY] = self.runtime_type
  370. mismatched_tests = self._cross_check_tests(report)
  371. result[constants.MISMATCHED_TESTS_KEY] = mismatched_tests
  372. result[constants.MISMATCHED_TESTS_COUNT_KEY] = len(mismatched_tests)
  373. return result
  374. def _cross_check_tests(self, report: dict):
  375. """
  376. Function to compare our report with the report provided by another test runner. Will perform a comparison and return a list of any tests that failed in the other test runner that did not fail in TIAF, or were not selected.
  377. Returns an empty list if not overloaded by a specialised test impact class.
  378. @param report: Dictionary containing the report provided by TIAF binary
  379. @return: List of tests that failed in test runner but did not fail in TIAF or weren't selected by TIAF.
  380. """
  381. return []
  382. def _compile_s3_top_level_dir_name(self, dir_name: str):
  383. """
  384. Function to build our s3_top_level_dir name. Reads the argument from our dictionary and then appends runtime_type to the end.
  385. If s3_top_level_dir name is not provided in args, we will default to "tiaf"+runtime_type.
  386. @param dir_name: Name of the directory to use as top level when compiling directory name.
  387. @return: Compiled s3_top_level_dir name
  388. """
  389. if dir_name:
  390. dir_name = f"{dir_name}/{self.runtime_type}"
  391. return dir_name
  392. raise SystemError(
  393. "s3_top_level_dir not set while trying to access s3 instance.")
  394. def _extract_test_runs_from_test_run_report(self, report: dict):
  395. """
  396. Extract all test runs from a test run report and store in one list
  397. @param report: The test run report.
  398. """
  399. test_runs = []
  400. test_runs += report[constants.PASSING_TEST_RUNS_KEY]
  401. test_runs += report[constants.FAILING_TEST_RUNS_KEY]
  402. test_runs += report[constants.EXECUTION_FAILURE_TEST_RUNS_KEY]
  403. test_runs += report[constants.TIMED_OUT_TEST_RUNS_KEY]
  404. test_runs += report[constants.UNEXECUTED_TEST_RUNS_KEY]
  405. return test_runs
  406. def _extract_test_runs_from_sequence_report(self, report):
  407. """
  408. Extract all relevant test runs from the report generated by the TIAF runtime.
  409. @param report: The report generated by the TIAF runtime, in dict format.
  410. """
  411. report_type = report[constants.SEQUENCE_TYPE_KEY]
  412. test_runs = self._extract_test_runs_from_test_run_report(
  413. report[constants.SELECTED_TEST_RUN_REPORT_KEY])
  414. if report_type == constants.IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY or report_type == constants.SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY:
  415. test_runs = test_runs + \
  416. self._extract_test_runs_from_test_run_report(
  417. report[constants.DRAFTED_TEST_RUN_REPORT_KEY])
  418. if report_type == constants.SAFE_IMPACT_ANALYSIS_SEQUENCE_TYPE_KEY:
  419. test_runs = test_runs + self._extract_test_runs_from_test_run_report(
  420. report[constants.DISCARDED_TEST_RUN_REPORT_KEY])
  421. return test_runs
  422. def run(self):
  423. """
  424. Builds our runtime argument string based on the initialisation state, then executes the runtime with those arguments.
  425. Stores the report of this run locally.
  426. Updates and stores historic data if storage is intialized and source branch is source of truth.
  427. Returns the runtime result as a dictionary.
  428. @return: Runtime results in a dictionary.
  429. """
  430. unpacked_args = " ".join(self._runtime_args)
  431. logger.info(f"Args: {unpacked_args}")
  432. args = [str(self._tiaf_bin)] + self._runtime_args
  433. runtime_result = subprocess.run(args, shell=True)
  434. report = None
  435. # If the sequence completed (with or without failures) we will update the historical meta-data
  436. if runtime_result.returncode == 0 or runtime_result.returncode == 7:
  437. logger.info("Test impact analysis runtime returned successfully.")
  438. # Get the sequence report the runtime generated
  439. with open(self._report_file) as json_file:
  440. report = json.load(json_file)
  441. # Grab the list of failing test targets for this sequence
  442. test_runs = self._extract_test_runs_from_sequence_report(report)
  443. # Attempt to store the historic data and artifacts for this branch and sequence
  444. if self._persistent_storage:
  445. if self._is_source_of_truth_branch:
  446. self._persistent_storage.update_and_store_historic_data(test_runs)
  447. self._persistent_storage.store_artifacts(self._runtime_artifact_directory, self._runtime_coverage_directory)
  448. else:
  449. logger.error(
  450. f"The test impact analysis runtime returned with error: '{runtime_result.returncode}'.")
  451. return self._generate_result(runtime_result.returncode, report)
  452. @property
  453. def _is_source_of_truth_branch(self):
  454. """
  455. True if the source branch the source of truth.
  456. False otherwise.
  457. """
  458. return self._source_of_truth_branch == self._src_branch
  459. @property
  460. def _has_historic_data(self):
  461. """
  462. True if persistent storage is not None and it has historic data.
  463. False otherwise.
  464. """
  465. if self._persistent_storage:
  466. return self._persistent_storage.has_historic_data
  467. return False
  468. @property
  469. def enabled(self):
  470. return self._enabled
  471. @property
  472. def source_branch(self):
  473. """
  474. The source branch for this TIAF run.
  475. """
  476. return self._src_branch
  477. @property
  478. def destination_branch(self):
  479. """
  480. The destination branch for this TIAF run.
  481. """
  482. return self._dst_branch
  483. @property
  484. def destination_commit(self):
  485. """
  486. The destination commit for this TIAF run.
  487. Destination commit is the commit that is being built.
  488. """
  489. return self._dst_commit
  490. @property
  491. def source_commit(self):
  492. """
  493. The source commit for this TIAF run.
  494. Source commit is the commit that we compare to for PR builds.
  495. """
  496. return self._src_commit
  497. @property
  498. def runtime_args(self):
  499. """
  500. The arguments to be passed to the TIAF runtime.
  501. """
  502. return self._runtime_args
  503. @property
  504. def has_change_list(self):
  505. """
  506. True if a change list has been generated for this TIAF run.
  507. """
  508. return self._has_change_list
  509. @property
  510. def instance_id(self):
  511. """
  512. The instance id of this TestImpact object.
  513. """
  514. return self._instance_id
  515. @property
  516. def test_suites(self):
  517. """
  518. The test suites being executed.
  519. """
  520. return self._suites
  521. @property
  522. def source_of_truth_branch(self):
  523. """
  524. The source of truth branch for this TIAF run.
  525. """
  526. return self._source_of_truth_branch
  527. @property
  528. @abstractmethod
  529. def runtime_type(self):
  530. """
  531. The runtime this TestImpact supports. Must be implemented by subclass.
  532. Current options are "native" or "python".
  533. """
  534. pass
  535. @property
  536. @abstractmethod
  537. def default_sequence_type(self):
  538. """
  539. The default sequence type for this TestImpact class. Must be implemented by subclass.
  540. """
  541. pass