PNG  IHDR* pHYs+ IDATx]n#; cdLb Ǚ[at¤_:uP}>!Usă cag޿ ֵNu`ݼTâabO7uL&y^wFٝA"l[|ŲHLN밪4*sG3|Dv}?+y߉{OuOAt4Jj.u]Gz*҉sP'VQKbA1u\`& Af;HWj hsO;ogTu uj7S3/QzUr&wS`M$X_L7r2;aE+ώ%vikDA:dR+%KzƉo>eOth$z%: :{WwaQ:wz%4foɹE[9<]#ERINƻv溂E%P1i01 |Jvҗ&{b?9g=^wζXn/lK::90KwrюO\!ջ3uzuGv^;騢wq<Iatv09:tt~hEG`v;3@MNZD.1]L:{ծI3`L(÷ba")Y.iljCɄae#I"1 `3*Bdz>j<fU40⨬%O$3cGt]j%Fߠ_twJ;ABU8vP3uEԑwQ V:h%))LfraqX-ۿX]v-\9I gl8tzX ]ecm)-cgʒ#Uw=Wlێn(0hPP/ӨtQ“&J35 $=]r1{tLuǮ*i0_;NƝ8;-vݏr8+U-kruȕYr0RnC]*ެ(M:]gE;{]tg(#ZJ9y>utRDRMdr9㪩̞zֹb<ģ&wzJM"iI( .ꮅX)Qw:9,i좜\Ԛi7&N0:asϓc];=ΗOӣ APqz93 y $)A*kVHZwBƺnWNaby>XMN*45~ղM6Nvm;A=jֲ.~1}(9`KJ/V F9[=`~[;sRuk]rєT!)iQO)Y$V ی ۤmzWz5IM Zb )ˆC`6 rRa}qNmUfDsWuˤV{ Pݝ'=Kֳbg,UҘVz2ﴻnjNgBb{? ߮tcsͻQuxVCIY۠:(V뺕 ٥2;t`@Fo{Z9`;]wMzU~%UA蛚dI vGq\r82iu +St`cR.6U/M9IENDB` REDROOM
PHP 5.6.40
Preview: thread.py Size: 5.38 KB
//lib64/python3.6/concurrent/futures/thread.py

# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.

"""Implements ThreadPoolExecutor."""

__author__ = 'Brian Quinlan (brian@sweetapp.com)'

import atexit
from concurrent.futures import _base
import itertools
import queue
import threading
import weakref
import os

# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
#   - The workers would still be running during interpreter shutdown,
#     meaning that they would fail in unpredictable ways.
#   - The workers could be killed while evaluating a work item, which could
#     be bad if the callable being evaluated has external side-effects e.g.
#     writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.

_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False

def _python_exit():
    global _shutdown
    _shutdown = True
    items = list(_threads_queues.items())
    for t, q in items:
        q.put(None)
    for t, q in items:
        t.join()

atexit.register(_python_exit)

class _WorkItem(object):
    def __init__(self, future, fn, args, kwargs):
        self.future = future
        self.fn = fn
        self.args = args
        self.kwargs = kwargs

    def run(self):
        if not self.future.set_running_or_notify_cancel():
            return

        try:
            result = self.fn(*self.args, **self.kwargs)
        except BaseException as exc:
            self.future.set_exception(exc)
            # Break a reference cycle with the exception 'exc'
            self = None
        else:
            self.future.set_result(result)

def _worker(executor_reference, work_queue):
    try:
        while True:
            work_item = work_queue.get(block=True)
            if work_item is not None:
                work_item.run()
                # Delete references to object. See issue16284
                del work_item
                continue
            executor = executor_reference()
            # Exit if:
            #   - The interpreter is shutting down OR
            #   - The executor that owns the worker has been collected OR
            #   - The executor that owns the worker has been shutdown.
            if _shutdown or executor is None or executor._shutdown:
                # Notice other workers
                work_queue.put(None)
                return
            del executor
    except BaseException:
        _base.LOGGER.critical('Exception in worker', exc_info=True)

class ThreadPoolExecutor(_base.Executor):

    # Used to assign unique thread names when thread_name_prefix is not supplied.
    _counter = itertools.count().__next__

    def __init__(self, max_workers=None, thread_name_prefix=''):
        """Initializes a new ThreadPoolExecutor instance.

        Args:
            max_workers: The maximum number of threads that can be used to
                execute the given calls.
            thread_name_prefix: An optional name prefix to give our threads.
        """
        if max_workers is None:
            # Use this number because ThreadPoolExecutor is often
            # used to overlap I/O instead of CPU work.
            max_workers = (os.cpu_count() or 1) * 5
        if max_workers <= 0:
            raise ValueError("max_workers must be greater than 0")

        self._max_workers = max_workers
        self._work_queue = queue.Queue()
        self._threads = set()
        self._shutdown = False
        self._shutdown_lock = threading.Lock()
        self._thread_name_prefix = (thread_name_prefix or
                                    ("ThreadPoolExecutor-%d" % self._counter()))

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._shutdown:
                raise RuntimeError('cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._work_queue.put(w)
            self._adjust_thread_count()
            return f
    submit.__doc__ = _base.Executor.submit.__doc__

    def _adjust_thread_count(self):
        # When the executor gets lost, the weakref callback will wake up
        # the worker threads.
        def weakref_cb(_, q=self._work_queue):
            q.put(None)
        # TODO(bquinlan): Should avoid creating new threads if there are more
        # idle threads than items in the work queue.
        num_threads = len(self._threads)
        if num_threads < self._max_workers:
            thread_name = '%s_%d' % (self._thread_name_prefix or self,
                                     num_threads)
            t = threading.Thread(name=thread_name, target=_worker,
                                 args=(weakref.ref(self, weakref_cb),
                                       self._work_queue))
            t.daemon = True
            t.start()
            self._threads.add(t)
            _threads_queues[t] = self._work_queue

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown = True
            self._work_queue.put(None)
        if wait:
            for t in self._threads:
                t.join()
    shutdown.__doc__ = _base.Executor.shutdown.__doc__

Directory Contents

Dirs: 1 × Files: 4

Name Size Perms Modified Actions
- drwxr-xr-x 2026-02-06 23:01:23
Edit Download
20.01 KB lrw-r--r-- 2018-12-23 21:37:14
Edit Download
5.38 KB lrw-r--r-- 2018-12-23 21:37:14
Edit Download
20.74 KB lrw-r--r-- 2018-12-23 21:37:14
Edit Download
800 B lrw-r--r-- 2018-12-23 21:37:14
Edit Download

If ZipArchive is unavailable, a .tar will be created (no compression).
© 2026 REDROOM — Secure File Manager. All rights reserved. Built with ❤️ & Red Dark UI