Use an asyncio.Event instead of an asyncio.Lock for global rate limits.

There were some dead-locking issues that I suspect were due to the
way the global rate limit was handled. This changes it into a simple
Event that allows multiple coroutines to pass through instead of one
by one.
This commit is contained in:
Rapptz 2017-04-01 23:26:44 -04:00
parent af0ba17885
commit ff95258710

View File

@ -92,7 +92,8 @@ class HTTPClient:
self.connector = connector
self._session = aiohttp.ClientSession(connector=connector, loop=self.loop)
self._locks = weakref.WeakValueDictionary()
self._global_lock = asyncio.Lock(loop=self.loop)
self._global_over = asyncio.Event(loop=self.loop)
self._global_over.set()
self.token = None
self.bot_token = False
@ -126,9 +127,9 @@ class HTTPClient:
kwargs['headers'] = headers
if self._global_lock.locked():
if not self._global_over.is_set():
# wait until the global lock is complete
yield from self._global_lock
yield from self._global_over.wait()
yield from lock
with MaybeUnlock(lock) as maybe_lock:
@ -172,15 +173,16 @@ class HTTPClient:
is_global = data.get('global', False)
if is_global:
log.info('Global rate limit has been hit. Retrying in {:.2} seconds.'.format(retry_after))
# acquire the global lock and block all processing
yield from self._global_lock
self._global_over.clear()
yield from asyncio.sleep(retry_after, loop=self.loop)
log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
self._global_lock.release()
self._global_over.set()
log.debug('Global rate limit is now over.')
continue