Recover more quickly from failures by default
This commit is contained in:
parent
474e8656e2
commit
786ced5c24
1 changed files with 8 additions and 2 deletions
10
cry/feed.py
10
cry/feed.py
|
|
@ -148,7 +148,10 @@ async def fetch_feed(
|
|||
return (None, feed)
|
||||
|
||||
if time.time() < feed.retry_after_ts:
|
||||
LOG.info(f"{feed.url} will not be pulled until {feed.retry_after_ts}")
|
||||
retry_str = time.strftime(
|
||||
"%Y-%m-%d %H:%M:%S %z", time.localtime(feed.retry_after_ts)
|
||||
)
|
||||
LOG.info(f"{feed.url} will not be pulled until {retry_str}")
|
||||
return (None, feed)
|
||||
|
||||
# We waffle back and forth about using feedreader's HTTP support vs
|
||||
|
|
@ -213,7 +216,10 @@ async def fetch_feed(
|
|||
except Exception:
|
||||
pass
|
||||
if retry_delta is None:
|
||||
retry_delta = 60 * 60 # 1 hour default
|
||||
if failed:
|
||||
retry_delta = 1 * 60 # Retry again in a minute
|
||||
else:
|
||||
retry_delta = 60 * 60 # 1 hour default
|
||||
|
||||
feed = dataclasses.replace(feed, retry_after_ts=int(time.time()) + retry_delta)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue