github: support paging for use_max_tag

This commit is contained in:
lilydjwg 2018-05-04 11:46:26 +08:00
parent 081266e807
commit 23e9464cda

View File

@ -3,6 +3,7 @@
import os
import re
from functools import partial
import structlog
@ -41,28 +42,65 @@ async def get_version(name, conf):
kwargs = {}
if conf.get('proxy'):
kwargs["proxy"] = conf.get("proxy")
if use_max_tag:
return await max_tag(partial(
session.get, headers=headers, **kwargs),
url, name, ignored_tags, include_tags_pattern,
sort_version_key,
)
async with session.get(url, headers=headers, **kwargs) as res:
data = await res.json()
if use_latest_release:
if 'tag_name' not in data:
logger.error('No tag found in upstream repository.',
name=name)
return
version = data['tag_name']
elif use_max_tag:
data = [tag["name"] for tag in data if tag["name"] not in ignored_tags]
if include_tags_pattern:
data = [x for x in data
if re.search(include_tags_pattern, x)]
data.sort(key=sort_version_key)
if not len(data):
logger.error('No tag found in upstream repository.',
name=name,
include_tags_pattern=include_tags_pattern)
return
version = data[-1]
else:
# YYYYMMDD.HHMMSS
version = data[0]['commit']['committer']['date'] \
.rstrip('Z').replace('-', '').replace(':', '').replace('T', '.')
return version
async def max_tag(
getter, url, name,
ignored_tags, include_tags_pattern, sort_version_key,
):
# paging is needed
while True:
async with getter(url) as res:
links = res.headers.get('Link')
data = await res.json()
data = [tag["name"] for tag in data if tag["name"] not in ignored_tags]
if include_tags_pattern:
data = [x for x in data
if re.search(include_tags_pattern, x)]
if data:
data.sort(key=sort_version_key)
return data[-1]
else:
next_page_url = get_next_page_url(links)
if not next_page_url:
break
else:
url = next_page_url
logger.error('No tag found in upstream repository.',
name=name,
include_tags_pattern=include_tags_pattern)
return
def get_next_page_url(links):
links = links.split(', ')
next_link = [x for x in links if x.endswith('rel="next"')]
if not next_link:
return
return next_link[0].split('>', 1)[0][1:]