side_by_side.py (7082B)
1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, You can obtain one at http://mozilla.org/MPL/2.0/. 4 5 6 import logging 7 import os 8 import sys 9 from functools import partial 10 11 from taskgraph.util.taskcluster import get_artifact, get_task_definition 12 13 from ..util.taskcluster import list_task_group_complete_tasks 14 from .registry import register_callback_action 15 from .util import create_tasks, fetch_graph_and_labels, get_decision_task_id, get_pushes 16 17 logger = logging.getLogger(__name__) 18 19 20 def input_for_support_action(revision, base_revision, base_branch, task): 21 """Generate input for action to be scheduled. 22 23 Define what label to schedule with 'label'. 24 If it is a test task that uses explicit manifests add that information. 25 """ 26 platform, test_name = task["metadata"]["name"].split("/opt-") 27 new_branch = os.environ.get("GECKO_HEAD_REPOSITORY", "/try").split("/")[-1] 28 symbol = task["extra"]["treeherder"]["symbol"] 29 input = { 30 "label": "perftest-linux-side-by-side", 31 "symbol": symbol, 32 "new_revision": revision, 33 "base_revision": base_revision, 34 "test_name": test_name, 35 "platform": platform, 36 "base_branch": base_branch, 37 "new_branch": new_branch, 38 } 39 40 return input 41 42 43 def side_by_side_modifier(task, input): 44 if task.label != input["label"]: 45 return task 46 47 # Make side-by-side job searchable by the platform, test name, and revisions 48 # it was triggered for 49 task.task["metadata"]["name"] = ( 50 f"{input['platform']} {input['test_name']} {input['base_revision'][:12]} {input['new_revision'][:12]}" 51 ) 52 # Use a job symbol to include the symbol of the job the side-by-side 53 # is running for 54 task.task["extra"]["treeherder"]["symbol"] += f"-{input['symbol']}" 55 56 cmd = task.task["payload"]["command"] 57 task.task["payload"]["command"][1][-1] = cmd[1][-1].format(**input) 58 59 return task 60 61 62 @register_callback_action( 63 title="Side by side", 64 name="side-by-side", 65 symbol="gen-sxs", 66 description=( 67 "Given a performance test pageload job generate a side-by-side comparison against" 68 "the pageload job from the revision at the input." 69 ), 70 order=200, 71 context=[{"test-type": "raptor"}], 72 schema={ 73 "type": "object", 74 "properties": { 75 "revision": { 76 "type": "string", 77 "default": "", 78 "description": "Revision of the push against the comparison is wanted.", 79 }, 80 "project": { 81 "type": "string", 82 "default": "autoland", 83 "description": "Revision of the push against the comparison is wanted.", 84 }, 85 }, 86 "additionalProperties": False, 87 }, 88 ) 89 def side_by_side_action(parameters, graph_config, input, task_group_id, task_id): 90 """ 91 This action does a side-by-side comparison between current revision and 92 the revision entered manually or the latest revision that ran the 93 pageload job (via support action). 94 95 To execute this action locally follow the documentation here: 96 https://taskcluster-taskgraph.readthedocs.io/en/latest/howto/create-actions.html#testing-actions 97 """ 98 task = get_task_definition(task_id) 99 decision_task_id, full_task_graph, label_to_taskid, _ = fetch_graph_and_labels( 100 parameters, graph_config 101 ) 102 # TODO: find another way to detect side-by-side comparable jobs 103 # (potentially lookig at the visual metrics flag) 104 if not ( 105 "browsertime-tp6" in task["metadata"]["name"] 106 or "welcome" in task["metadata"]["name"] 107 ): 108 logger.exception( 109 f"Task {task['metadata']['name']} is not side-by-side comparable." 110 ) 111 return 112 113 failed = False 114 input_for_action = {} 115 116 if input.get("revision"): 117 # If base_revision was introduced manually, use that 118 input_for_action = input_for_support_action( 119 revision=parameters["head_rev"], 120 base_revision=input.get("revision"), 121 base_branch=input.get("project"), 122 task=task, 123 ) 124 else: 125 current_push_id = int(parameters["pushlog_id"]) - 1 126 # Go decrementally through pushlog_id, get push data, decision task id, 127 # full task graph and everything needed to find which of the past revisions 128 # ran the pageload job to compare against 129 while int(parameters["pushlog_id"]) - current_push_id < 30: 130 pushes = get_pushes( 131 project=parameters["head_repository"], 132 end_id=current_push_id, 133 depth=1, 134 full_response=True, 135 ) 136 try: 137 # Get label-to-taskid.json artifact + the tasks triggered 138 # by the action tasks at a later time than the decision task 139 current_decision_task_id = get_decision_task_id( 140 parameters["project"], current_push_id 141 ) 142 current_task_group_id = get_task_definition(current_decision_task_id)[ 143 "taskGroupId" 144 ] 145 current_label_to_taskid = get_artifact( 146 current_decision_task_id, "public/label-to-taskid.json" 147 ) 148 current_full_label_to_taskid = current_label_to_taskid.copy() 149 action_task_triggered = list_task_group_complete_tasks( 150 current_task_group_id 151 ) 152 current_full_label_to_taskid.update(action_task_triggered) 153 if task["metadata"]["name"] in current_full_label_to_taskid.keys(): 154 input_for_action = input_for_support_action( 155 revision=parameters["head_rev"], 156 base_revision=pushes[str(current_push_id)]["changesets"][-1], 157 base_branch=input.get("project", parameters["project"]), 158 task=task, 159 ) 160 break 161 except Exception: 162 logger.warning( 163 f"Could not find decision task for push {current_push_id}" 164 ) 165 # The decision task may have failed, this is common enough that we 166 # don't want to report an error for it. 167 continue 168 current_push_id -= 1 169 if not input_for_action: 170 raise Exception( 171 "Could not find a side-by-side comparable task within a depth of 30 revisions." 172 ) 173 174 try: 175 create_tasks( 176 graph_config, 177 [input_for_action["label"]], 178 full_task_graph, 179 label_to_taskid, 180 parameters, 181 decision_task_id, 182 modifier=partial(side_by_side_modifier, input=input_for_action), 183 ) 184 except Exception as e: 185 logger.exception(f"Failed to trigger action: {e}.") 186 failed = True 187 188 if failed: 189 sys.exit(1)