blob: b1d741ea89c16fd6294cc5ae5f1f2266aebbaeab [file] [log] [blame]
Milosz Wasilewski705ae7c2021-01-14 14:21:39 +00001import csv
2import logging
3import multiprocessing
4import requests
5import statistics
6import traceback
7from datetime import datetime, timedelta
8from multiprocessing import Pool, Lock
9from urllib.parse import urljoin, urlsplit
10
11
12logger = multiprocessing.log_to_stderr()
13logger.setLevel(logging.DEBUG)
14ch = logging.StreamHandler()
15ch.setLevel(logging.DEBUG)
16formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
17ch.setFormatter(formatter)
18logger.addHandler(ch)
19
20def process_testjob_list(testjob_list, lava_auth, build, writer_queue, global_writer_queue, build_queue):
21 # get job details from LAVA
22 print("starting fetch for build %s" % build['version'])
23 print(len(testjob_list))
24 wait_time_list = []
25 execution_time_list = []
26 total_time_list = []
27 job_priority_list = []
28 first_job_submission_time = now
29 last_job_end_time = datetime.strptime(build['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ")
30 failed_jobs = 0
31 for testjob in testjob_list:
32 # check job status
33 if testjob['job_status'] != "Complete":
34 failed_jobs += 1
35 # get testjob backend and ask for detalils
36 # assubmit all backends are LAVA type
37 backend = backend_dict.get(testjob['backend'].rsplit("/", 2)[1])
38 if backend is not None:
39 backend_url_parts = urlsplit(backend['url'])
40 backend_rest_url = "%s://%s/api/v0.2/" % (backend_url_parts.scheme, backend_url_parts.netloc)
41 job_details_request = requests.get(
42 urljoin(backend_rest_url, "jobs/%s/" % testjob['job_id']),
43 auth=NullAuth(),
44 headers=lava_auth
45 )
46 if job_details_request.status_code == 200:
47 job_details = job_details_request.json()
48 if job_details['start_time']:
49 submit_time = datetime.strptime(job_details['submit_time'], "%Y-%m-%dT%H:%M:%S.%fZ")
50 start_time = datetime.strptime(job_details['start_time'], "%Y-%m-%dT%H:%M:%S.%fZ")
51 end_time = datetime.strptime(job_details['end_time'], "%Y-%m-%dT%H:%M:%S.%fZ")
52 wait_time = start_time - submit_time
53 wait_time_list.append(wait_time.total_seconds())
54 execution_time = end_time - start_time
55 execution_time_list.append(execution_time.total_seconds())
56 job_priority_list.append(job_details['priority'])
57 if first_job_submission_time > submit_time :
58 first_job_submission_time = submit_time
59 if last_job_end_time < end_time:
60 last_job_end_time = end_time
61 build_queue.put(
62 {
63 'build_version': build['version'],
64 'job_id': testjob['job_id'],
65 'priority': job_details['priority'],
66 'submit_time': submit_time,
67 'wait_time': wait_time,
68 'execution_time': execution_time
69 }
70 )
71 else:
72 continue
73 row = {}
74 if wait_time_list:
75 row = {
76 'date': datetime.strptime(build['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d %H:%M:%S"),
77 'build version': build['version'],
78 'no of jobs': len(testjob_list),
79 'no of failed jobs': failed_jobs,
80 'avg job priority': statistics.mean(job_priority_list),
81 'avg wait time': statistics.mean(wait_time_list),
82 'avg execution time':statistics.mean(execution_time_list),
83 'total execution time': (last_job_end_time - first_job_submission_time).total_seconds()
84 }
85
86 global_writer_queue.put(row)
87 return row
88
89
90def listener_process(queue):
91 with open("total.csv", "w") as global_csv_stats:
92 global_writer = csv.DictWriter(global_csv_stats, fieldnames)
93 global_writer.writeheader()
94 while True:
95 try:
96 record = queue.get()
97 if record is None: # We send this as a sentinel to tell the listener to quit.
98 break
99 global_writer.writerow(record)
100 except Exception:
101 import sys, traceback
102 print('Whoops! Problem:', file=sys.stderr)
103 traceback.print_exc(file=sys.stderr)
104
105
106def build_listener_process(build_name, queue):
107 with open("%s.csv" % (build_name), "w") as csv_file:
108 fieldnames = ['build_version', 'job_id', 'priority', 'submit_time', 'wait_time', 'execution_time']
109 writer = csv.DictWriter(csv_file, fieldnames)
110 writer.writeheader()
111 while True:
112 try:
113 record = queue.get()
114 if record is None: # We send this as a sentinel to tell the listener to quit.
115 break
116 writer.writerow(record)
117 except Exception:
118 import sys, traceback
119 print('Whoops! Problem:', file=sys.stderr)
120 traceback.print_exc(file=sys.stderr)
121
122
123def log_results(results):
124 print(results)
125 logger.debug(results)
126
127# if wait_time_list:
128# with writer_lock:
129# writer.writerow({
130# 'date': datetime.strptime(build['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d %H:%M:%S"),
131# 'build version': build['version'],
132# 'no of jobs': len(testjob_list),
133# 'no of failed jobs': failed_jobs,
134# 'avg job priority': statistics.mean(job_priority_list),
135# 'avg wait time': statistics.mean(wait_time_list),
136# 'avg execution time':statistics.mean(execution_time_list),
137# 'total execution time': (last_job_end_time - first_job_submission_time).total_seconds()
138# })
139# with global_writer_lock:
140# global_writer.writerow({
141# 'date': datetime.strptime(build['created_at'], "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d %H:%M:%S"),
142# 'build version': build['version'],
143# 'no of jobs': len(testjob_list),
144# 'no of failed jobs': failed_jobs,
145# 'avg job priority': statistics.mean(job_priority_list),
146# 'avg wait time': statistics.mean(wait_time_list),
147# 'avg execution time':statistics.mean(execution_time_list),
148# 'total execution time': (last_job_end_time - first_job_submission_time).total_seconds()
149# })
150
151# create CSV with the following format
152# date, build version, # of jobs, # of failed jobs, avg wait time, avg execution time, time from first submission to last result
153
154fieldnames = [
155 'date',
156 'build version',
157 'no of jobs',
158 'no of failed jobs',
159 'avg job priority',
160 'avg wait time',
161 'avg execution time',
162 'total execution time'
163]
164
165class NullAuth(requests.auth.AuthBase):
166 '''force requests to ignore the ``.netrc``
167
168 Some sites do not support regular authentication, but we still
169 want to store credentials in the ``.netrc`` file and submit them
170 as form elements. Without this, requests would otherwise use the
171 .netrc which leads, on some sites, to a 401 error.
172
173 Use with::
174
175 requests.get(url, auth=NullAuth())
176
177 Copied from: https://github.com/psf/requests/issues/2773#issuecomment-174312831
178 '''
179
180 def __call__(self, r):
181 return r
182
183
184android_list = [
185 "android-lkft/4.14-10.0-gsi-hikey",
186 "android-lkft/4.14-10.0-gsi-hikey960",
187 "android-lkft/4.14-master-hikey",
188 "android-lkft/4.14-master-hikey960",
189 "android-lkft/4.14-master-hikey960-lkft",
190 "android-lkft/4.14-master-hikey-lkft",
191 "android-lkft/4.14p-10.0-gsi-hikey",
192 "android-lkft/4.14p-10.0-gsi-hikey960",
193 "android-lkft/4.14-stable-master-hikey960-lkft",
194 "android-lkft/4.14-stable-master-hikey-lkft",
195 "android-lkft/4.19-10.0-gsi-hikey",
196 "android-lkft/4.19-10.0-gsi-hikey960",
197 "android-lkft/4.19-9.0-hikey960-auto",
198 "android-lkft/4.19-9.0-hikey-auto",
199 "android-lkft/4.19-master-hikey",
200 "android-lkft/4.19-master-hikey960",
201 "android-lkft/4.19-master-hikey960-lkft",
202 "android-lkft/4.19-master-hikey-lkft",
203 "android-lkft/4.19-stable-master-hikey960-lkft",
204 "android-lkft/4.19-stable-master-hikey-lkft",
205 "android-lkft/4.4o-10.0-gsi-hikey",
206 "android-lkft/4.4o-9.0-lcr-hikey",
207 "android-lkft/4.4p-10.0-gsi-hikey",
208 "android-lkft/4.4p-rc-10.0-gsi-hikey",
209 "android-lkft/4.4p-rc-9.0-hikey",
210 "android-lkft/4.9-10.0-gsi-hikey",
211 "android-lkft/4.9-10.0-gsi-hikey960",
212 "android-lkft/4.9o-10.0-gsi-hikey",
213 "android-lkft/4.9o-10.0-gsi-hikey960",
214 "android-lkft/4.9o-9.0-lcr-hikey",
215 "android-lkft/4.9p-10.0-gsi-hikey",
216 "android-lkft/4.9p-10.0-gsi-hikey960",
217]
218
219lkft_list = [
220# "warp7/warp7-bsp",
221# "lkft/linux-stable-rc-linux-4.4.y",
222# "lkft/linux-stable-rc-linux-4.4.y-sanity",
223# "lkft/linux-stable-rc-linux-4.9.y",
224# "lkft/linux-stable-rc-linux-4.9.y-sanity",
225# "lkft/linux-stable-rc-linux-4.14.y",
226# "lkft/linux-stable-rc-linux-4.14.y-sanity",
227# "lkft/linux-stable-rc-linux-4.19.y",
228# "lkft/linux-stable-rc-linux-4.19.y-sanity",
229 "lkft/linux-stable-rc-linux-5.4.y",
230 "lkft/linux-stable-rc-linux-5.4.y-sanity",
231 "lkft/linux-stable-rc-linux-5.8.y",
232 "lkft/linux-stable-rc-linux-5.8.y-sanity"
233]
234
235project_list = lkft_list
236
237auth = {
238 "Authorization": "Token 86dda92be75dd6929b97e3bce4d4f660afedd0dd"
239}
240
241lava_auth = {
242 "Authorization": "Token d3xjq5twg9hs562bcqpiezgx1yknrfp4ojhf0470hga0cg3k8qu1bndf3mzwbbu02us5e3zxbor8m2ezwst7405hk4ob4n6nurk71el1nmb10699lo8lszozzkqxaqp8"
243}
244
245qa_reports_url = "https://qa-reports.linaro.org/api/"
246now = datetime.now()
247delta = timedelta(weeks=26)
248
249# get backend list
250backend_dict = {}
251backend_list_request = requests.get(
252 urljoin(qa_reports_url, "backends/"),
253 auth=NullAuth(),
254 headers=auth
255)
256if backend_list_request.status_code == 200:
257 backend_list = backend_list_request.json()['results']
258 for backend in backend_list:
259 backend_dict.update({str(backend['id']): backend})
260
261
262global_writer_queue = multiprocessing.Queue()
263
264workers = []
265writers = []
266writer_queues = []
267#pool = Pool()
268
269listener = multiprocessing.Process(target=listener_process, args=(global_writer_queue,))
270listener.start()
271
272for project in project_list:
273 print(project)
274 group_slug, project_slug = project.split("/")
275 query_params = {
276 "project__slug": project_slug,
277 "project__group__slug": group_slug,
278 "created_at__gte": now-delta,
279 "ordering": "created_at"
280 }
281 # get list of builds for last 6 months
282 build_list_request = requests.get(
283 urljoin(qa_reports_url, "builds"),
284 params=query_params,
285 headers=auth,
286 auth=NullAuth()
287 )
288 build_list = []
289 if build_list_request.status_code == 200:
290 build_list = build_list_request.json()['results']
291 else:
292 print(build_list_request.status_code)
293 print(build_list_request.text)
294 if not build_list:
295 # no builds, go to the next project
296 continue
297 while build_list_request.json()['next']:
298 build_list_request = requests.get(
299 build_list_request.json()['next'],
300 headers=auth,
301 auth=NullAuth()
302 )
303 if build_list_request.status_code == 200:
304 build_list = build_list + build_list_request.json()['results']
305
306 writer_queue = multiprocessing.Queue()
307 writer_queues.append(writer_queue)
308 writer_listener = multiprocessing.Process(target=build_listener_process, args=("%s_%s" % (group_slug, project_slug), writer_queue,))
309 writer_listener.start()
310 writers.append(writer_listener)
311 # for each build, get list of LAVA jobs
312 for build in build_list:
313 testjob_list_request = requests.get(
314 build['testjobs'],
315 auth=NullAuth(),
316 headers=auth
317 )
318 testjob_list = []
319 if testjob_list_request.status_code == 200:
320 testjob_list = testjob_list_request.json()['results']
321 while testjob_list_request.json()['next']:
322 try:
323 testjob_list_request = requests.get(
324 testjob_list_request.json()['next'],
325 auth=NullAuth(),
326 headers=auth
327 )
328 if testjob_list_request.status_code == 200:
329 testjob_list = testjob_list + testjob_list_request.json()['results']
330 except requests.exceptions.ConnectionError:
331 pass
332 logger.debug("processing jobs for: %s" % build['version'])
333 #p = pool.apply_async(process_testjob_list, [testjob_list, lava_auth, build, writer_queue, global_writer_queue], callback=log_results)
334 w = multiprocessing.Process(target=process_testjob_list, args=(testjob_list, lava_auth, build, writer_queue, global_writer_queue, writer_queue))
335 workers.append(w)
336 w.start()
337
338#global_csv_stats.close()
339#pool.close()
340#pool.join()
341print(global_writer_queue.qsize())
342for w in workers:
343 w.join()
344for wq in writer_queues:
345 wq.put(None)
346for w in writers:
347 w.join()
348global_writer_queue.put(None)
349listener.join()
350