PKGBUILDs/extra/qt5-webengine/0003-bind-gen-Support-single_process-flag-in-generate_bin.patch
2021-10-29 15:07:29 +00:00

157 lines
6.4 KiB
Diff

From a3a795afdd386242fa115309c5fd349b0095bfdd Mon Sep 17 00:00:00 2001
From: Yuki Shiino <yukishiino@chromium.org>
Date: Mon, 5 Oct 2020 11:01:57 +0000
Subject: [PATCH 3/4] bind-gen: Support --single_process flag in
generate_bindings.py
Error messages of generate_bindings.py are often hard to read
due to multiprocessing. Supports --single_process flag for
debugging purpose.
Bug: 839389
Change-Id: Ibe0d55716cdc9d5db77b0714dd96c11832ab0143
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2447529
Reviewed-by: Hitoshi Yoshida <peria@chromium.org>
Commit-Queue: Yuki Shiino <yukishiino@chromium.org>
Cr-Commit-Position: refs/heads/master@{#813675}
---
.../bindings/scripts/bind_gen/task_queue.py | 74 +++++++++++--------
.../bindings/scripts/generate_bindings.py | 8 +-
2 files changed, 49 insertions(+), 33 deletions(-)
diff --git a/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py b/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py
index e666a9b668e..42b96d41688 100644
--- a/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py
+++ b/chromium/third_party/blink/renderer/bindings/scripts/bind_gen/task_queue.py
@@ -14,10 +14,23 @@ class TaskQueue(object):
tasks will be executed in parallel.
"""
- def __init__(self):
- self._pool_size = multiprocessing.cpu_count()
- self._pool = multiprocessing.Pool(self._pool_size,
- package_initializer().init)
+ def __init__(self, single_process=False):
+ """
+ Args:
+ single_process: True makes the instance will not create nor use a
+ child process so that error messages will be easier to read.
+ This is useful for debugging.
+ """
+ assert isinstance(single_process, bool)
+ if single_process:
+ self._single_process = True
+ self._pool_size = 1
+ self._pool = None
+ else:
+ self._single_process = False
+ self._pool_size = multiprocessing.cpu_count()
+ self._pool = multiprocessing.Pool(self._pool_size,
+ package_initializer().init)
self._requested_tasks = [] # List of (func, args, kwargs)
self._worker_tasks = [] # List of multiprocessing.pool.AsyncResult
self._did_run = False
@@ -37,28 +50,42 @@ class TaskQueue(object):
Args:
report_progress: A callable that takes two arguments, total number
of worker tasks and number of completed worker tasks.
- Scheduled tasks are reorganized into worker tasks, so the
- number of worker tasks may be different from the number of
- scheduled tasks.
"""
assert report_progress is None or callable(report_progress)
assert not self._did_run
assert not self._worker_tasks
self._did_run = True
- num_of_requested_tasks = len(self._requested_tasks)
- chunk_size = 1
- i = 0
- while i < num_of_requested_tasks:
- tasks = self._requested_tasks[i:i + chunk_size]
- i += chunk_size
+ if self._single_process:
+ self._run_in_sequence(report_progress)
+ else:
+ self._run_in_parallel(report_progress)
+
+ def _run_in_sequence(self, report_progress):
+ for index, task in enumerate(self._requested_tasks):
+ func, args, kwargs = task
+ report_progress(len(self._requested_tasks), index)
+ func(*args, **kwargs)
+ report_progress(len(self._requested_tasks), len(self._requested_tasks))
+
+ def _run_in_parallel(self, report_progress):
+ for task in self._requested_tasks:
+ func, args, kwargs = task
self._worker_tasks.append(
- self._pool.apply_async(_task_queue_run_tasks, [tasks]))
+ self._pool.apply_async(func, args, kwargs))
self._pool.close()
+ def report_worker_task_progress():
+ if not report_progress:
+ return
+ done_count = functools.reduce(
+ lambda count, worker_task: count + bool(worker_task.ready()),
+ self._worker_tasks, 0)
+ report_progress(len(self._worker_tasks), done_count)
+
timeout_in_sec = 1
while True:
- self._report_worker_task_progress(report_progress)
+ report_worker_task_progress()
for worker_task in self._worker_tasks:
if not worker_task.ready():
worker_task.wait(timeout_in_sec)
@@ -70,20 +97,3 @@ class TaskQueue(object):
break
self._pool.join()
-
- def _report_worker_task_progress(self, report_progress):
- assert report_progress is None or callable(report_progress)
-
- if not report_progress:
- return
-
- done_count = functools.reduce(
- lambda count, worker_task: count + bool(worker_task.ready()),
- self._worker_tasks, 0)
- report_progress(len(self._worker_tasks), done_count)
-
-
-def _task_queue_run_tasks(tasks):
- for task in tasks:
- func, args, kwargs = task
- func(*args, **kwargs)
diff --git a/chromium/third_party/blink/renderer/bindings/scripts/generate_bindings.py b/chromium/third_party/blink/renderer/bindings/scripts/generate_bindings.py
index 96a9ebccdbf..528d1b2f61d 100644
--- a/chromium/third_party/blink/renderer/bindings/scripts/generate_bindings.py
+++ b/chromium/third_party/blink/renderer/bindings/scripts/generate_bindings.py
@@ -37,6 +37,12 @@ def parse_options():
type="string",
help='output directory for "modules" component relative '
'to root_gen_dir')
+ parser.add_option(
+ '--single_process',
+ action="store_true",
+ default=False,
+ help=('run everything in a single process, which makes debugging '
+ 'easier'))
options, args = parser.parse_args()
required_option_names = ("web_idl_database", "root_src_dir",
@@ -76,7 +82,7 @@ def main():
root_gen_dir=options.root_gen_dir,
component_reldirs=component_reldirs)
- task_queue = bind_gen.TaskQueue()
+ task_queue = bind_gen.TaskQueue(single_process=options.single_process)
for task in tasks:
dispatch_table[task](task_queue)
--
2.33.0