original_string
stringlengths
75
104k
code_tokens
sequencelengths
19
28.4k
docstring_tokens
sequencelengths
1
1.97k
repo
stringlengths
7
55
code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
func_name
stringlengths
1
134
path
stringlengths
4
223
partition
stringclasses
3 values
language
stringclasses
1 value
url
stringlengths
87
315
sha
stringlengths
40
40
def get_vid_from_url(url): """Extracts video ID from URL. """ return match1(url, r'youtu\.be/([^?/]+)') or \ match1(url, r'youtube\.com/embed/([^/?]+)') or \ match1(url, r'youtube\.com/v/([^/?]+)') or \ match1(url, r'youtube\.com/watch/([^/?]+)') or \ parse_query_param(url, 'v') or \ parse_query_param(parse_query_param(url, 'u'), 'v')
[ "def", "get_vid_from_url", "(", "url", ")", ":", "return", "match1", "(", "url", ",", "r'youtu\\.be/([^?/]+)'", ")", "or", "match1", "(", "url", ",", "r'youtube\\.com/embed/([^/?]+)'", ")", "or", "match1", "(", "url", ",", "r'youtube\\.com/v/([^/?]+)'", ")", "or", "match1", "(", "url", ",", "r'youtube\\.com/watch/([^/?]+)'", ")", "or", "parse_query_param", "(", "url", ",", "'v'", ")", "or", "parse_query_param", "(", "parse_query_param", "(", "url", ",", "'u'", ")", ",", "'v'", ")" ]
[ "Extracts", "video", "ID", "from", "URL", "." ]
soimort/you-get
def get_vid_from_url(url): """Extracts video ID from URL. """ return match1(url, r'youtu\.be/([^?/]+)') or \ match1(url, r'youtube\.com/embed/([^/?]+)') or \ match1(url, r'youtube\.com/v/([^/?]+)') or \ match1(url, r'youtube\.com/watch/([^/?]+)') or \ parse_query_param(url, 'v') or \ parse_query_param(parse_query_param(url, 'u'), 'v')
Extracts video ID from URL.
YouTube.get_vid_from_url
src/you_get/extractors/youtube.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/youtube.py#L135-L143
b746ac01c9f39de94cac2d56f665285b0523b974
def sina_xml_to_url_list(xml_data): """str->list Convert XML to URL List. From Biligrab. """ rawurl = [] dom = parseString(xml_data) for node in dom.getElementsByTagName('durl'): url = node.getElementsByTagName('url')[0] rawurl.append(url.childNodes[0].data) return rawurl
[ "def", "sina_xml_to_url_list", "(", "xml_data", ")", ":", "rawurl", "=", "[", "]", "dom", "=", "parseString", "(", "xml_data", ")", "for", "node", "in", "dom", ".", "getElementsByTagName", "(", "'durl'", ")", ":", "url", "=", "node", ".", "getElementsByTagName", "(", "'url'", ")", "[", "0", "]", "rawurl", ".", "append", "(", "url", ".", "childNodes", "[", "0", "]", ".", "data", ")", "return", "rawurl" ]
[ "str", "-", ">", "list", "Convert", "XML", "to", "URL", "List", ".", "From", "Biligrab", "." ]
soimort/you-get
def sina_xml_to_url_list(xml_data): """str->list Convert XML to URL List. From Biligrab. """ rawurl = [] dom = parseString(xml_data) for node in dom.getElementsByTagName('durl'): url = node.getElementsByTagName('url')[0] rawurl.append(url.childNodes[0].data) return rawurl
str->list Convert XML to URL List. From Biligrab.
sina_xml_to_url_list
src/you_get/extractors/miomio.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/miomio.py#L41-L51
b746ac01c9f39de94cac2d56f665285b0523b974
def makeMimi(upid): """From http://cdn37.atwikiimg.com/sitescript/pub/dksitescript/FC2.site.js Also com.hps.util.fc2.FC2EncrptUtil.makeMimiLocal L110""" strSeed = "gGddgPfeaf_gzyr" prehash = upid + "_" + strSeed return md5(prehash.encode('utf-8')).hexdigest()
[ "def", "makeMimi", "(", "upid", ")", ":", "strSeed", "=", "\"gGddgPfeaf_gzyr\"", "prehash", "=", "upid", "+", "\"_\"", "+", "strSeed", "return", "md5", "(", "prehash", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")" ]
[ "From", "http", ":", "//", "cdn37", ".", "atwikiimg", ".", "com", "/", "sitescript", "/", "pub", "/", "dksitescript", "/", "FC2", ".", "site", ".", "js", "Also", "com", ".", "hps", ".", "util", ".", "fc2", ".", "FC2EncrptUtil", ".", "makeMimiLocal", "L110" ]
soimort/you-get
def makeMimi(upid): """From http://cdn37.atwikiimg.com/sitescript/pub/dksitescript/FC2.site.js Also com.hps.util.fc2.FC2EncrptUtil.makeMimiLocal L110""" strSeed = "gGddgPfeaf_gzyr" prehash = upid + "_" + strSeed return md5(prehash.encode('utf-8')).hexdigest()
From http://cdn37.atwikiimg.com/sitescript/pub/dksitescript/FC2.site.js Also com.hps.util.fc2.FC2EncrptUtil.makeMimiLocal L110
makeMimi
src/you_get/extractors/fc2video.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/fc2video.py#L11-L17
b746ac01c9f39de94cac2d56f665285b0523b974
def fc2video_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): """wrapper""" #'http://video.fc2.com/en/content/20151021bTVKnbEw' #'http://xiaojiadianvideo.asia/content/20151021bTVKnbEw' #'http://video.fc2.com/ja/content/20151021bTVKnbEw' #'http://video.fc2.com/tw/content/20151021bTVKnbEw' hostname = urlparse(url).hostname if not ('fc2.com' in hostname or 'xiaojiadianvideo.asia' in hostname): return False upid = match1(url, r'.+/content/(\w+)') fc2video_download_by_upid(upid, output_dir, merge, info_only)
[ "def", "fc2video_download", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "#'http://video.fc2.com/en/content/20151021bTVKnbEw'", "#'http://xiaojiadianvideo.asia/content/20151021bTVKnbEw'", "#'http://video.fc2.com/ja/content/20151021bTVKnbEw'", "#'http://video.fc2.com/tw/content/20151021bTVKnbEw'", "hostname", "=", "urlparse", "(", "url", ")", ".", "hostname", "if", "not", "(", "'fc2.com'", "in", "hostname", "or", "'xiaojiadianvideo.asia'", "in", "hostname", ")", ":", "return", "False", "upid", "=", "match1", "(", "url", ",", "r'.+/content/(\\w+)'", ")", "fc2video_download_by_upid", "(", "upid", ",", "output_dir", ",", "merge", ",", "info_only", ")" ]
[ "wrapper" ]
soimort/you-get
def fc2video_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): """wrapper""" #'http://video.fc2.com/en/content/20151021bTVKnbEw' #'http://xiaojiadianvideo.asia/content/20151021bTVKnbEw' #'http://video.fc2.com/ja/content/20151021bTVKnbEw' #'http://video.fc2.com/tw/content/20151021bTVKnbEw' hostname = urlparse(url).hostname if not ('fc2.com' in hostname or 'xiaojiadianvideo.asia' in hostname): return False upid = match1(url, r'.+/content/(\w+)') fc2video_download_by_upid(upid, output_dir, merge, info_only)
wrapper
fc2video_download
src/you_get/extractors/fc2video.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/fc2video.py#L46-L57
b746ac01c9f39de94cac2d56f665285b0523b974
def dailymotion_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Dailymotion videos by URL. """ html = get_content(rebuilt_url(url)) info = json.loads(match1(html, r'qualities":({.+?}),"')) title = match1(html, r'"video_title"\s*:\s*"([^"]+)"') or \ match1(html, r'"title"\s*:\s*"([^"]+)"') title = unicodize(title) for quality in ['1080','720','480','380','240','144','auto']: try: real_url = info[quality][1]["url"] if real_url: break except KeyError: pass mime, ext, size = url_info(real_url) print_info(site_info, title, mime, size) if not info_only: download_urls([real_url], title, ext, size, output_dir=output_dir, merge=merge)
[ "def", "dailymotion_download", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "html", "=", "get_content", "(", "rebuilt_url", "(", "url", ")", ")", "info", "=", "json", ".", "loads", "(", "match1", "(", "html", ",", "r'qualities\":({.+?}),\"'", ")", ")", "title", "=", "match1", "(", "html", ",", "r'\"video_title\"\\s*:\\s*\"([^\"]+)\"'", ")", "or", "match1", "(", "html", ",", "r'\"title\"\\s*:\\s*\"([^\"]+)\"'", ")", "title", "=", "unicodize", "(", "title", ")", "for", "quality", "in", "[", "'1080'", ",", "'720'", ",", "'480'", ",", "'380'", ",", "'240'", ",", "'144'", ",", "'auto'", "]", ":", "try", ":", "real_url", "=", "info", "[", "quality", "]", "[", "1", "]", "[", "\"url\"", "]", "if", "real_url", ":", "break", "except", "KeyError", ":", "pass", "mime", ",", "ext", ",", "size", "=", "url_info", "(", "real_url", ")", "print_info", "(", "site_info", ",", "title", ",", "mime", ",", "size", ")", "if", "not", "info_only", ":", "download_urls", "(", "[", "real_url", "]", ",", "title", ",", "ext", ",", "size", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")" ]
[ "Downloads", "Dailymotion", "videos", "by", "URL", "." ]
soimort/you-get
def dailymotion_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Dailymotion videos by URL. """ html = get_content(rebuilt_url(url)) info = json.loads(match1(html, r'qualities":({.+?}),"')) title = match1(html, r'"video_title"\s*:\s*"([^"]+)"') or \ match1(html, r'"title"\s*:\s*"([^"]+)"') title = unicodize(title) for quality in ['1080','720','480','380','240','144','auto']: try: real_url = info[quality][1]["url"] if real_url: break except KeyError: pass mime, ext, size = url_info(real_url) print_info(site_info, title, mime, size) if not info_only: download_urls([real_url], title, ext, size, output_dir=output_dir, merge=merge)
Downloads Dailymotion videos by URL.
dailymotion_download
src/you_get/extractors/dailymotion.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/dailymotion.py#L13-L35
b746ac01c9f39de94cac2d56f665285b0523b974
def dictify(r,root=True): """http://stackoverflow.com/a/30923963/2946714""" if root: return {r.tag : dictify(r, False)} d=copy(r.attrib) if r.text: d["_text"]=r.text for x in r.findall("./*"): if x.tag not in d: d[x.tag]=[] d[x.tag].append(dictify(x,False)) return d
[ "def", "dictify", "(", "r", ",", "root", "=", "True", ")", ":", "if", "root", ":", "return", "{", "r", ".", "tag", ":", "dictify", "(", "r", ",", "False", ")", "}", "d", "=", "copy", "(", "r", ".", "attrib", ")", "if", "r", ".", "text", ":", "d", "[", "\"_text\"", "]", "=", "r", ".", "text", "for", "x", "in", "r", ".", "findall", "(", "\"./*\"", ")", ":", "if", "x", ".", "tag", "not", "in", "d", ":", "d", "[", "x", ".", "tag", "]", "=", "[", "]", "d", "[", "x", ".", "tag", "]", ".", "append", "(", "dictify", "(", "x", ",", "False", ")", ")", "return", "d" ]
[ "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "30923963", "/", "2946714" ]
soimort/you-get
def dictify(r,root=True): """http://stackoverflow.com/a/30923963/2946714""" if root: return {r.tag : dictify(r, False)} d=copy(r.attrib) if r.text: d["_text"]=r.text for x in r.findall("./*"): if x.tag not in d: d[x.tag]=[] d[x.tag].append(dictify(x,False)) return d
http://stackoverflow.com/a/30923963/2946714
dictify
src/you_get/extractors/ucas.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ucas.py#L18-L29
b746ac01c9f39de94cac2d56f665285b0523b974
def ucas_download_single(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''video page''' html = get_content(url) # resourceID is UUID resourceID = re.findall( r'resourceID":"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})', html)[0] assert resourceID != '', 'Cannot find resourceID!' title = match1(html, r'<div class="bc-h">(.+)</div>') url_lists = _ucas_get_url_lists_by_resourceID(resourceID) assert url_lists, 'Cannot find any URL of such class!' for k, part in enumerate(url_lists): part_title = title + '_' + str(k) print_info(site_info, part_title, 'flv', 0) if not info_only: download_urls(part, part_title, 'flv', total_size=None, output_dir=output_dir, merge=merge)
[ "def", "ucas_download_single", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "html", "=", "get_content", "(", "url", ")", "# resourceID is UUID", "resourceID", "=", "re", ".", "findall", "(", "r'resourceID\":\"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})'", ",", "html", ")", "[", "0", "]", "assert", "resourceID", "!=", "''", ",", "'Cannot find resourceID!'", "title", "=", "match1", "(", "html", ",", "r'<div class=\"bc-h\">(.+)</div>'", ")", "url_lists", "=", "_ucas_get_url_lists_by_resourceID", "(", "resourceID", ")", "assert", "url_lists", ",", "'Cannot find any URL of such class!'", "for", "k", ",", "part", "in", "enumerate", "(", "url_lists", ")", ":", "part_title", "=", "title", "+", "'_'", "+", "str", "(", "k", ")", "print_info", "(", "site_info", ",", "part_title", ",", "'flv'", ",", "0", ")", "if", "not", "info_only", ":", "download_urls", "(", "part", ",", "part_title", ",", "'flv'", ",", "total_size", "=", "None", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")" ]
[ "video", "page" ]
soimort/you-get
def ucas_download_single(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''video page''' html = get_content(url) # resourceID is UUID resourceID = re.findall( r'resourceID":"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})', html)[0] assert resourceID != '', 'Cannot find resourceID!' title = match1(html, r'<div class="bc-h">(.+)</div>') url_lists = _ucas_get_url_lists_by_resourceID(resourceID) assert url_lists, 'Cannot find any URL of such class!' for k, part in enumerate(url_lists): part_title = title + '_' + str(k) print_info(site_info, part_title, 'flv', 0) if not info_only: download_urls(part, part_title, 'flv', total_size=None, output_dir=output_dir, merge=merge)
video page
ucas_download_single
src/you_get/extractors/ucas.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ucas.py#L102-L117
b746ac01c9f39de94cac2d56f665285b0523b974
def ucas_download_playlist(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''course page''' html = get_content(url) parts = re.findall( r'(getplaytitle.do\?.+)"', html) assert parts, 'No part found!' for part_path in parts: ucas_download('http://v.ucas.ac.cn/course/' + part_path, output_dir=output_dir, merge=merge, info_only=info_only)
[ "def", "ucas_download_playlist", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "html", "=", "get_content", "(", "url", ")", "parts", "=", "re", ".", "findall", "(", "r'(getplaytitle.do\\?.+)\"'", ",", "html", ")", "assert", "parts", ",", "'No part found!'", "for", "part_path", "in", "parts", ":", "ucas_download", "(", "'http://v.ucas.ac.cn/course/'", "+", "part_path", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")" ]
[ "course", "page" ]
soimort/you-get
def ucas_download_playlist(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''course page''' html = get_content(url) parts = re.findall( r'(getplaytitle.do\?.+)"', html) assert parts, 'No part found!' for part_path in parts: ucas_download('http://v.ucas.ac.cn/course/' + part_path, output_dir=output_dir, merge=merge, info_only=info_only)
course page
ucas_download_playlist
src/you_get/extractors/ucas.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ucas.py#L119-L127
b746ac01c9f39de94cac2d56f665285b0523b974
def sina_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False): """Downloads a Sina video by its unique vid. http://video.sina.com.cn/ """ xml = api_req(vid) urls, name, size = video_info(xml) if urls is None: log.wtf(name) title = name print_info(site_info, title, 'flv', size) if not info_only: download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
[ "def", "sina_download_by_vid", "(", "vid", ",", "title", "=", "None", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ")", ":", "xml", "=", "api_req", "(", "vid", ")", "urls", ",", "name", ",", "size", "=", "video_info", "(", "xml", ")", "if", "urls", "is", "None", ":", "log", ".", "wtf", "(", "name", ")", "title", "=", "name", "print_info", "(", "site_info", ",", "title", ",", "'flv'", ",", "size", ")", "if", "not", "info_only", ":", "download_urls", "(", "urls", ",", "title", ",", "'flv'", ",", "size", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")" ]
[ "Downloads", "a", "Sina", "video", "by", "its", "unique", "vid", ".", "http", ":", "//", "video", ".", "sina", ".", "com", ".", "cn", "/" ]
soimort/you-get
def sina_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False): """Downloads a Sina video by its unique vid. http://video.sina.com.cn/ """ xml = api_req(vid) urls, name, size = video_info(xml) if urls is None: log.wtf(name) title = name print_info(site_info, title, 'flv', size) if not info_only: download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
Downloads a Sina video by its unique vid. http://video.sina.com.cn/
sina_download_by_vid
src/you_get/extractors/sina.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/sina.py#L41-L52
b746ac01c9f39de94cac2d56f665285b0523b974
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False): """Downloads a Sina video by its unique vkey. http://video.sina.com/ """ url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey type, ext, size = url_info(url) print_info(site_info, title, 'flv', size) if not info_only: download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge)
[ "def", "sina_download_by_vkey", "(", "vkey", ",", "title", "=", "None", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ")", ":", "url", "=", "'http://video.sina.com/v/flvideo/%s_0.flv'", "%", "vkey", "type", ",", "ext", ",", "size", "=", "url_info", "(", "url", ")", "print_info", "(", "site_info", ",", "title", ",", "'flv'", ",", "size", ")", "if", "not", "info_only", ":", "download_urls", "(", "[", "url", "]", ",", "title", ",", "'flv'", ",", "size", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")" ]
[ "Downloads", "a", "Sina", "video", "by", "its", "unique", "vkey", ".", "http", ":", "//", "video", ".", "sina", ".", "com", "/" ]
soimort/you-get
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False): """Downloads a Sina video by its unique vkey. http://video.sina.com/ """ url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey type, ext, size = url_info(url) print_info(site_info, title, 'flv', size) if not info_only: download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge)
Downloads a Sina video by its unique vkey. http://video.sina.com/
sina_download_by_vkey
src/you_get/extractors/sina.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/sina.py#L54-L64
b746ac01c9f39de94cac2d56f665285b0523b974
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Sina videos by URL. """ if 'news.sina.com.cn/zxt' in url: sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) return vid = match1(url, r'vid=(\d+)') if vid is None: video_page = get_content(url) vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'') if hd_vid == '0': vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|') vid = vids[-1] if vid is None: vid = match1(video_page, r'vid:"?(\d+)"?') if vid: #title = match1(video_page, r'title\s*:\s*\'([^\']+)\'') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) else: vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"') if vkey is None: vid = match1(url, r'#(\d+)') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) return title = match1(video_page, r'title\s*:\s*"([^"]+)"') sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
[ "def", "sina_download", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "'news.sina.com.cn/zxt'", "in", "url", ":", "sina_zxt", "(", "url", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ",", "*", "*", "kwargs", ")", "return", "vid", "=", "match1", "(", "url", ",", "r'vid=(\\d+)'", ")", "if", "vid", "is", "None", ":", "video_page", "=", "get_content", "(", "url", ")", "vid", "=", "hd_vid", "=", "match1", "(", "video_page", ",", "r'hd_vid\\s*:\\s*\\'([^\\']+)\\''", ")", "if", "hd_vid", "==", "'0'", ":", "vids", "=", "match1", "(", "video_page", ",", "r'[^\\w]vid\\s*:\\s*\\'([^\\']+)\\''", ")", ".", "split", "(", "'|'", ")", "vid", "=", "vids", "[", "-", "1", "]", "if", "vid", "is", "None", ":", "vid", "=", "match1", "(", "video_page", ",", "r'vid:\"?(\\d+)\"?'", ")", "if", "vid", ":", "#title = match1(video_page, r'title\\s*:\\s*\\'([^\\']+)\\'')", "sina_download_by_vid", "(", "vid", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")", "else", ":", "vkey", "=", "match1", "(", "video_page", ",", "r'vkey\\s*:\\s*\"([^\"]+)\"'", ")", "if", "vkey", "is", "None", ":", "vid", "=", "match1", "(", "url", ",", "r'#(\\d+)'", ")", "sina_download_by_vid", "(", "vid", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")", "return", "title", "=", "match1", "(", "video_page", ",", "r'title\\s*:\\s*\"([^\"]+)\"'", ")", "sina_download_by_vkey", "(", "vkey", ",", "title", "=", "title", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")" ]
[ "Downloads", "Sina", "videos", "by", "URL", "." ]
soimort/you-get
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Sina videos by URL. """ if 'news.sina.com.cn/zxt' in url: sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) return vid = match1(url, r'vid=(\d+)') if vid is None: video_page = get_content(url) vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'') if hd_vid == '0': vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|') vid = vids[-1] if vid is None: vid = match1(video_page, r'vid:"?(\d+)"?') if vid: #title = match1(video_page, r'title\s*:\s*\'([^\']+)\'') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) else: vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"') if vkey is None: vid = match1(url, r'#(\d+)') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) return title = match1(video_page, r'title\s*:\s*"([^"]+)"') sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
Downloads Sina videos by URL.
sina_download
src/you_get/extractors/sina.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/sina.py#L94-L121
b746ac01c9f39de94cac2d56f665285b0523b974
def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): """wrapper""" hostname = urlparse(url).hostname if 'n.miaopai.com' == hostname: smid = match1(url, r'n\.miaopai\.com/media/([^.]+)') miaopai_download_by_smid(smid, output_dir, merge, info_only) return elif 'miaopai.com' in hostname: #Miaopai yixia_download_by_scid = yixia_miaopai_download_by_scid site_info = "Yixia Miaopai" scid = match1(url, r'miaopai\.com/show/channel/([^.]+)\.htm') or \ match1(url, r'miaopai\.com/show/([^.]+)\.htm') or \ match1(url, r'm\.miaopai\.com/show/channel/([^.]+)\.htm') or \ match1(url, r'm\.miaopai\.com/show/channel/([^.]+)') elif 'xiaokaxiu.com' in hostname: #Xiaokaxiu yixia_download_by_scid = yixia_xiaokaxiu_download_by_scid site_info = "Yixia Xiaokaxiu" if re.match(r'http://v.xiaokaxiu.com/v/.+\.html', url): #PC scid = match1(url, r'http://v.xiaokaxiu.com/v/(.+)\.html') elif re.match(r'http://m.xiaokaxiu.com/m/.+\.html', url): #Mobile scid = match1(url, r'http://m.xiaokaxiu.com/m/(.+)\.html') else: pass yixia_download_by_scid(scid, output_dir, merge, info_only)
[ "def", "yixia_download", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "hostname", "=", "urlparse", "(", "url", ")", ".", "hostname", "if", "'n.miaopai.com'", "==", "hostname", ":", "smid", "=", "match1", "(", "url", ",", "r'n\\.miaopai\\.com/media/([^.]+)'", ")", "miaopai_download_by_smid", "(", "smid", ",", "output_dir", ",", "merge", ",", "info_only", ")", "return", "elif", "'miaopai.com'", "in", "hostname", ":", "#Miaopai", "yixia_download_by_scid", "=", "yixia_miaopai_download_by_scid", "site_info", "=", "\"Yixia Miaopai\"", "scid", "=", "match1", "(", "url", ",", "r'miaopai\\.com/show/channel/([^.]+)\\.htm'", ")", "or", "match1", "(", "url", ",", "r'miaopai\\.com/show/([^.]+)\\.htm'", ")", "or", "match1", "(", "url", ",", "r'm\\.miaopai\\.com/show/channel/([^.]+)\\.htm'", ")", "or", "match1", "(", "url", ",", "r'm\\.miaopai\\.com/show/channel/([^.]+)'", ")", "elif", "'xiaokaxiu.com'", "in", "hostname", ":", "#Xiaokaxiu", "yixia_download_by_scid", "=", "yixia_xiaokaxiu_download_by_scid", "site_info", "=", "\"Yixia Xiaokaxiu\"", "if", "re", ".", "match", "(", "r'http://v.xiaokaxiu.com/v/.+\\.html'", ",", "url", ")", ":", "#PC", "scid", "=", "match1", "(", "url", ",", "r'http://v.xiaokaxiu.com/v/(.+)\\.html'", ")", "elif", "re", ".", "match", "(", "r'http://m.xiaokaxiu.com/m/.+\\.html'", ",", "url", ")", ":", "#Mobile", "scid", "=", "match1", "(", "url", ",", "r'http://m.xiaokaxiu.com/m/(.+)\\.html'", ")", "else", ":", "pass", "yixia_download_by_scid", "(", "scid", ",", "output_dir", ",", "merge", ",", "info_only", ")" ]
[ "wrapper" ]
soimort/you-get
def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): """wrapper""" hostname = urlparse(url).hostname if 'n.miaopai.com' == hostname: smid = match1(url, r'n\.miaopai\.com/media/([^.]+)') miaopai_download_by_smid(smid, output_dir, merge, info_only) return elif 'miaopai.com' in hostname: #Miaopai yixia_download_by_scid = yixia_miaopai_download_by_scid site_info = "Yixia Miaopai" scid = match1(url, r'miaopai\.com/show/channel/([^.]+)\.htm') or \ match1(url, r'miaopai\.com/show/([^.]+)\.htm') or \ match1(url, r'm\.miaopai\.com/show/channel/([^.]+)\.htm') or \ match1(url, r'm\.miaopai\.com/show/channel/([^.]+)') elif 'xiaokaxiu.com' in hostname: #Xiaokaxiu yixia_download_by_scid = yixia_xiaokaxiu_download_by_scid site_info = "Yixia Xiaokaxiu" if re.match(r'http://v.xiaokaxiu.com/v/.+\.html', url): #PC scid = match1(url, r'http://v.xiaokaxiu.com/v/(.+)\.html') elif re.match(r'http://m.xiaokaxiu.com/m/.+\.html', url): #Mobile scid = match1(url, r'http://m.xiaokaxiu.com/m/(.+)\.html') else: pass yixia_download_by_scid(scid, output_dir, merge, info_only)
wrapper
yixia_download
src/you_get/extractors/yixia.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/yixia.py#L65-L93
b746ac01c9f39de94cac2d56f665285b0523b974
def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''Get item_id''' if re.match(r'http://www.veoh.com/watch/\w+', url): item_id = match1(url, r'http://www.veoh.com/watch/(\w+)') elif re.match(r'http://www.veoh.com/m/watch.php\?v=\.*', url): item_id = match1(url, r'http://www.veoh.com/m/watch.php\?v=(\w+)') else: raise NotImplementedError('Cannot find item ID') veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = info_only, **kwargs)
[ "def", "veoh_download", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "re", ".", "match", "(", "r'http://www.veoh.com/watch/\\w+'", ",", "url", ")", ":", "item_id", "=", "match1", "(", "url", ",", "r'http://www.veoh.com/watch/(\\w+)'", ")", "elif", "re", ".", "match", "(", "r'http://www.veoh.com/m/watch.php\\?v=\\.*'", ",", "url", ")", ":", "item_id", "=", "match1", "(", "url", ",", "r'http://www.veoh.com/m/watch.php\\?v=(\\w+)'", ")", "else", ":", "raise", "NotImplementedError", "(", "'Cannot find item ID'", ")", "veoh_download_by_id", "(", "item_id", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "info_only", ",", "*", "*", "kwargs", ")" ]
[ "Get", "item_id" ]
soimort/you-get
def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''Get item_id''' if re.match(r'http://www.veoh.com/watch/\w+', url): item_id = match1(url, r'http://www.veoh.com/watch/(\w+)') elif re.match(r'http://www.veoh.com/m/watch.php\?v=\.*', url): item_id = match1(url, r'http://www.veoh.com/m/watch.php\?v=(\w+)') else: raise NotImplementedError('Cannot find item ID') veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = info_only, **kwargs)
Get item_id
veoh_download
src/you_get/extractors/veoh.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/veoh.py#L8-L16
b746ac01c9f39de94cac2d56f665285b0523b974
def veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = False, **kwargs): """Source: Android mobile""" webpage_url = 'http://www.veoh.com/m/watch.php?v={item_id}&quality=1'.format(item_id = item_id) #grab download URL a = get_content(webpage_url, decoded=True) url = match1(a, r'<source src="(.*?)\"\W') #grab title title = match1(a, r'<meta property="og:title" content="([^"]*)"') type_, ext, size = url_info(url) print_info(site_info, title, type_, size) if not info_only: download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
[ "def", "veoh_download_by_id", "(", "item_id", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "webpage_url", "=", "'http://www.veoh.com/m/watch.php?v={item_id}&quality=1'", ".", "format", "(", "item_id", "=", "item_id", ")", "#grab download URL", "a", "=", "get_content", "(", "webpage_url", ",", "decoded", "=", "True", ")", "url", "=", "match1", "(", "a", ",", "r'<source src=\"(.*?)\\\"\\W'", ")", "#grab title", "title", "=", "match1", "(", "a", ",", "r'<meta property=\"og:title\" content=\"([^\"]*)\"'", ")", "type_", ",", "ext", ",", "size", "=", "url_info", "(", "url", ")", "print_info", "(", "site_info", ",", "title", ",", "type_", ",", "size", ")", "if", "not", "info_only", ":", "download_urls", "(", "[", "url", "]", ",", "title", ",", "ext", ",", "total_size", "=", "None", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")" ]
[ "Source", ":", "Android", "mobile" ]
soimort/you-get
def veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = False, **kwargs): """Source: Android mobile""" webpage_url = 'http://www.veoh.com/m/watch.php?v={item_id}&quality=1'.format(item_id = item_id) #grab download URL a = get_content(webpage_url, decoded=True) url = match1(a, r'<source src="(.*?)\"\W') #grab title title = match1(a, r'<meta property="og:title" content="([^"]*)"') type_, ext, size = url_info(url) print_info(site_info, title, type_, size) if not info_only: download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
Source: Android mobile
veoh_download_by_id
src/you_get/extractors/veoh.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/veoh.py#L19-L33
b746ac01c9f39de94cac2d56f665285b0523b974
def download_by_id(self, vid = '', title = None, output_dir='.', merge=True, info_only=False,**kwargs): """self, str->None Keyword arguments: self: self vid: The video ID for BokeCC cloud, something like FE3BB999594978049C33DC5901307461 Calls the prepare() to download the video. If no title is provided, this method shall try to find a proper title with the information providin within the returned content of the API.""" assert vid self.prepare(vid = vid, title = title, **kwargs) self.extract(**kwargs) self.download(output_dir = output_dir, merge = merge, info_only = info_only, **kwargs)
[ "def", "download_by_id", "(", "self", ",", "vid", "=", "''", ",", "title", "=", "None", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "assert", "vid", "self", ".", "prepare", "(", "vid", "=", "vid", ",", "title", "=", "title", ",", "*", "*", "kwargs", ")", "self", ".", "extract", "(", "*", "*", "kwargs", ")", "self", ".", "download", "(", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ",", "*", "*", "kwargs", ")" ]
[ "self", "str", "-", ">", "None", "Keyword", "arguments", ":", "self", ":", "self", "vid", ":", "The", "video", "ID", "for", "BokeCC", "cloud", "something", "like", "FE3BB999594978049C33DC5901307461", "Calls", "the", "prepare", "()", "to", "download", "the", "video", ".", "If", "no", "title", "is", "provided", "this", "method", "shall", "try", "to", "find", "a", "proper", "title", "with", "the", "information", "providin", "within", "the", "returned", "content", "of", "the", "API", "." ]
soimort/you-get
def download_by_id(self, vid = '', title = None, output_dir='.', merge=True, info_only=False,**kwargs): """self, str->None Keyword arguments: self: self vid: The video ID for BokeCC cloud, something like FE3BB999594978049C33DC5901307461 Calls the prepare() to download the video. If no title is provided, this method shall try to find a proper title with the information providin within the returned content of the API.""" assert vid self.prepare(vid = vid, title = title, **kwargs) self.extract(**kwargs) self.download(output_dir = output_dir, merge = merge, info_only = info_only, **kwargs)
self, str->None Keyword arguments: self: self vid: The video ID for BokeCC cloud, something like FE3BB999594978049C33DC5901307461 Calls the prepare() to download the video. If no title is provided, this method shall try to find a proper title with the information providin within the returned content of the API.
BokeCC.download_by_id
src/you_get/extractors/bokecc.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/bokecc.py#L17-L39
b746ac01c9f39de94cac2d56f665285b0523b974
def get_vid_from_url(self, url): """Extracts video ID from live.qq.com. """ hit = re.search(r'live.qq.com/(\d+)', url) if hit is not None: return hit.group(1) hit = re.search(r'live.qq.com/directory/match/(\d+)', url) if hit is not None: return self.get_room_id_from_url(hit.group(1)) html = get_content(url) room_id = match1(html, r'room_id\":(\d+)') if room_id is None: log.wtf('Unknown page {}'.format(url)) return room_id
[ "def", "get_vid_from_url", "(", "self", ",", "url", ")", ":", "hit", "=", "re", ".", "search", "(", "r'live.qq.com/(\\d+)'", ",", "url", ")", "if", "hit", "is", "not", "None", ":", "return", "hit", ".", "group", "(", "1", ")", "hit", "=", "re", ".", "search", "(", "r'live.qq.com/directory/match/(\\d+)'", ",", "url", ")", "if", "hit", "is", "not", "None", ":", "return", "self", ".", "get_room_id_from_url", "(", "hit", ".", "group", "(", "1", ")", ")", "html", "=", "get_content", "(", "url", ")", "room_id", "=", "match1", "(", "html", ",", "r'room_id\\\":(\\d+)'", ")", "if", "room_id", "is", "None", ":", "log", ".", "wtf", "(", "'Unknown page {}'", ".", "format", "(", "url", ")", ")", "return", "room_id" ]
[ "Extracts", "video", "ID", "from", "live", ".", "qq", ".", "com", "." ]
soimort/you-get
def get_vid_from_url(self, url): """Extracts video ID from live.qq.com. """ hit = re.search(r'live.qq.com/(\d+)', url) if hit is not None: return hit.group(1) hit = re.search(r'live.qq.com/directory/match/(\d+)', url) if hit is not None: return self.get_room_id_from_url(hit.group(1)) html = get_content(url) room_id = match1(html, r'room_id\":(\d+)') if room_id is None: log.wtf('Unknown page {}'.format(url)) return room_id
Extracts video ID from live.qq.com.
QiE.get_vid_from_url
src/you_get/extractors/qie.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/qie.py#L35-L48
b746ac01c9f39de94cac2d56f665285b0523b974
def sprint(text, *colors): """Format text with color or other effects into ANSI escaped string.""" return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text
[ "def", "sprint", "(", "text", ",", "*", "colors", ")", ":", "return", "\"\\33[{}m{content}\\33[{}m\"", ".", "format", "(", "\";\"", ".", "join", "(", "[", "str", "(", "color", ")", "for", "color", "in", "colors", "]", ")", ",", "RESET", ",", "content", "=", "text", ")", "if", "IS_ANSI_TERMINAL", "and", "colors", "else", "text" ]
[ "Format", "text", "with", "color", "or", "other", "effects", "into", "ANSI", "escaped", "string", "." ]
soimort/you-get
def sprint(text, *colors): """Format text with color or other effects into ANSI escaped string.""" return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text
Format text with color or other effects into ANSI escaped string.
sprint
src/you_get/util/log.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L60-L62
b746ac01c9f39de94cac2d56f665285b0523b974
def print_log(text, *colors): """Print a log message to standard error.""" sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n")
[ "def", "print_log", "(", "text", ",", "*", "colors", ")", ":", "sys", ".", "stderr", ".", "write", "(", "sprint", "(", "\"{}: {}\"", ".", "format", "(", "script_name", ",", "text", ")", ",", "*", "colors", ")", "+", "\"\\n\"", ")" ]
[ "Print", "a", "log", "message", "to", "standard", "error", "." ]
soimort/you-get
def print_log(text, *colors): """Print a log message to standard error.""" sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n")
Print a log message to standard error.
print_log
src/you_get/util/log.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L72-L74
b746ac01c9f39de94cac2d56f665285b0523b974
def e(message, exit_code=None): """Print an error log message.""" print_log(message, YELLOW, BOLD) if exit_code is not None: sys.exit(exit_code)
[ "def", "e", "(", "message", ",", "exit_code", "=", "None", ")", ":", "print_log", "(", "message", ",", "YELLOW", ",", "BOLD", ")", "if", "exit_code", "is", "not", "None", ":", "sys", ".", "exit", "(", "exit_code", ")" ]
[ "Print", "an", "error", "log", "message", "." ]
soimort/you-get
def e(message, exit_code=None): """Print an error log message.""" print_log(message, YELLOW, BOLD) if exit_code is not None: sys.exit(exit_code)
Print an error log message.
e
src/you_get/util/log.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L88-L92
b746ac01c9f39de94cac2d56f665285b0523b974
def wtf(message, exit_code=1): """What a Terrible Failure!""" print_log(message, RED, BOLD) if exit_code is not None: sys.exit(exit_code)
[ "def", "wtf", "(", "message", ",", "exit_code", "=", "1", ")", ":", "print_log", "(", "message", ",", "RED", ",", "BOLD", ")", "if", "exit_code", "is", "not", "None", ":", "sys", ".", "exit", "(", "exit_code", ")" ]
[ "What", "a", "Terrible", "Failure!" ]
soimort/you-get
def wtf(message, exit_code=1): """What a Terrible Failure!""" print_log(message, RED, BOLD) if exit_code is not None: sys.exit(exit_code)
What a Terrible Failure!
wtf
src/you_get/util/log.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L94-L98
b746ac01c9f39de94cac2d56f665285b0523b974
def detect_os(): """Detect operating system. """ # Inspired by: # https://github.com/scivision/pybashutils/blob/78b7f2b339cb03b1c37df94015098bbe462f8526/pybashutils/windows_linux_detect.py syst = system().lower() os = 'unknown' if 'cygwin' in syst: os = 'cygwin' elif 'darwin' in syst: os = 'mac' elif 'linux' in syst: os = 'linux' # detect WSL https://github.com/Microsoft/BashOnWindows/issues/423 try: with open('/proc/version', 'r') as f: if 'microsoft' in f.read().lower(): os = 'wsl' except: pass elif 'windows' in syst: os = 'windows' elif 'bsd' in syst: os = 'bsd' return os
[ "def", "detect_os", "(", ")", ":", "# Inspired by:", "# https://github.com/scivision/pybashutils/blob/78b7f2b339cb03b1c37df94015098bbe462f8526/pybashutils/windows_linux_detect.py", "syst", "=", "system", "(", ")", ".", "lower", "(", ")", "os", "=", "'unknown'", "if", "'cygwin'", "in", "syst", ":", "os", "=", "'cygwin'", "elif", "'darwin'", "in", "syst", ":", "os", "=", "'mac'", "elif", "'linux'", "in", "syst", ":", "os", "=", "'linux'", "# detect WSL https://github.com/Microsoft/BashOnWindows/issues/423", "try", ":", "with", "open", "(", "'/proc/version'", ",", "'r'", ")", "as", "f", ":", "if", "'microsoft'", "in", "f", ".", "read", "(", ")", ".", "lower", "(", ")", ":", "os", "=", "'wsl'", "except", ":", "pass", "elif", "'windows'", "in", "syst", ":", "os", "=", "'windows'", "elif", "'bsd'", "in", "syst", ":", "os", "=", "'bsd'", "return", "os" ]
[ "Detect", "operating", "system", "." ]
soimort/you-get
def detect_os(): """Detect operating system. """ # Inspired by: # https://github.com/scivision/pybashutils/blob/78b7f2b339cb03b1c37df94015098bbe462f8526/pybashutils/windows_linux_detect.py syst = system().lower() os = 'unknown' if 'cygwin' in syst: os = 'cygwin' elif 'darwin' in syst: os = 'mac' elif 'linux' in syst: os = 'linux' # detect WSL https://github.com/Microsoft/BashOnWindows/issues/423 try: with open('/proc/version', 'r') as f: if 'microsoft' in f.read().lower(): os = 'wsl' except: pass elif 'windows' in syst: os = 'windows' elif 'bsd' in syst: os = 'bsd' return os
Detect operating system.
detect_os
src/you_get/util/os.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/os.py#L5-L32
b746ac01c9f39de94cac2d56f665285b0523b974
def miaopai_download_by_fid(fid, output_dir = '.', merge = False, info_only = False, **kwargs): '''Source: Android mobile''' page_url = 'http://video.weibo.com/show?fid=' + fid + '&type=mp4' mobile_page = get_content(page_url, headers=fake_headers_mobile) url = match1(mobile_page, r'<video id=.*?src=[\'"](.*?)[\'"]\W') if url is None: wb_mp = re.search(r'<script src=([\'"])(.+?wb_mp\.js)\1>', mobile_page).group(2) return miaopai_download_by_wbmp(wb_mp, fid, output_dir=output_dir, merge=merge, info_only=info_only, total_size=None, **kwargs) title = match1(mobile_page, r'<title>((.|\n)+?)</title>') if not title: title = fid title = title.replace('\n', '_') ext, size = 'mp4', url_info(url)[2] print_info(site_info, title, ext, size) if not info_only: download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
[ "def", "miaopai_download_by_fid", "(", "fid", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "page_url", "=", "'http://video.weibo.com/show?fid='", "+", "fid", "+", "'&type=mp4'", "mobile_page", "=", "get_content", "(", "page_url", ",", "headers", "=", "fake_headers_mobile", ")", "url", "=", "match1", "(", "mobile_page", ",", "r'<video id=.*?src=[\\'\"](.*?)[\\'\"]\\W'", ")", "if", "url", "is", "None", ":", "wb_mp", "=", "re", ".", "search", "(", "r'<script src=([\\'\"])(.+?wb_mp\\.js)\\1>'", ",", "mobile_page", ")", ".", "group", "(", "2", ")", "return", "miaopai_download_by_wbmp", "(", "wb_mp", ",", "fid", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ",", "total_size", "=", "None", ",", "*", "*", "kwargs", ")", "title", "=", "match1", "(", "mobile_page", ",", "r'<title>((.|\\n)+?)</title>'", ")", "if", "not", "title", ":", "title", "=", "fid", "title", "=", "title", ".", "replace", "(", "'\\n'", ",", "'_'", ")", "ext", ",", "size", "=", "'mp4'", ",", "url_info", "(", "url", ")", "[", "2", "]", "print_info", "(", "site_info", ",", "title", ",", "ext", ",", "size", ")", "if", "not", "info_only", ":", "download_urls", "(", "[", "url", "]", ",", "title", ",", "ext", ",", "total_size", "=", "None", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")" ]
[ "Source", ":", "Android", "mobile" ]
soimort/you-get
def miaopai_download_by_fid(fid, output_dir = '.', merge = False, info_only = False, **kwargs): '''Source: Android mobile''' page_url = 'http://video.weibo.com/show?fid=' + fid + '&type=mp4' mobile_page = get_content(page_url, headers=fake_headers_mobile) url = match1(mobile_page, r'<video id=.*?src=[\'"](.*?)[\'"]\W') if url is None: wb_mp = re.search(r'<script src=([\'"])(.+?wb_mp\.js)\1>', mobile_page).group(2) return miaopai_download_by_wbmp(wb_mp, fid, output_dir=output_dir, merge=merge, info_only=info_only, total_size=None, **kwargs) title = match1(mobile_page, r'<title>((.|\n)+?)</title>') if not title: title = fid title = title.replace('\n', '_') ext, size = 'mp4', url_info(url)[2] print_info(site_info, title, ext, size) if not info_only: download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
Source: Android mobile
miaopai_download_by_fid
src/you_get/extractors/miaopai.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/miaopai.py#L20-L37
b746ac01c9f39de94cac2d56f665285b0523b974
def vimeo_download_by_channel(url, output_dir='.', merge=False, info_only=False, **kwargs): """str->None""" # https://vimeo.com/channels/464686 channel_id = match1(url, r'http://vimeo.com/channels/(\w+)') vimeo_download_by_channel_id(channel_id, output_dir, merge, info_only, **kwargs)
[ "def", "vimeo_download_by_channel", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# https://vimeo.com/channels/464686", "channel_id", "=", "match1", "(", "url", ",", "r'http://vimeo.com/channels/(\\w+)'", ")", "vimeo_download_by_channel_id", "(", "channel_id", ",", "output_dir", ",", "merge", ",", "info_only", ",", "*", "*", "kwargs", ")" ]
[ "str", "-", ">", "None" ]
soimort/you-get
def vimeo_download_by_channel(url, output_dir='.', merge=False, info_only=False, **kwargs): """str->None""" # https://vimeo.com/channels/464686 channel_id = match1(url, r'http://vimeo.com/channels/(\w+)') vimeo_download_by_channel_id(channel_id, output_dir, merge, info_only, **kwargs)
str->None
vimeo_download_by_channel
src/you_get/extractors/vimeo.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/vimeo.py#L15-L19
b746ac01c9f39de94cac2d56f665285b0523b974
def vimeo_download_by_channel_id(channel_id, output_dir='.', merge=False, info_only=False, **kwargs): """str/int->None""" html = get_content('https://api.vimeo.com/channels/{channel_id}/videos?access_token={access_token}'.format(channel_id=channel_id, access_token=access_token)) data = loads(html) id_list = [] #print(data) for i in data['data']: id_list.append(match1(i['uri'], r'/videos/(\w+)')) for id in id_list: try: vimeo_download_by_id(id, None, output_dir, merge, info_only, **kwargs) except urllib.error.URLError as e: log.w('{} failed with {}'.format(id, e))
[ "def", "vimeo_download_by_channel_id", "(", "channel_id", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "html", "=", "get_content", "(", "'https://api.vimeo.com/channels/{channel_id}/videos?access_token={access_token}'", ".", "format", "(", "channel_id", "=", "channel_id", ",", "access_token", "=", "access_token", ")", ")", "data", "=", "loads", "(", "html", ")", "id_list", "=", "[", "]", "#print(data)", "for", "i", "in", "data", "[", "'data'", "]", ":", "id_list", ".", "append", "(", "match1", "(", "i", "[", "'uri'", "]", ",", "r'/videos/(\\w+)'", ")", ")", "for", "id", "in", "id_list", ":", "try", ":", "vimeo_download_by_id", "(", "id", ",", "None", ",", "output_dir", ",", "merge", ",", "info_only", ",", "*", "*", "kwargs", ")", "except", "urllib", ".", "error", ".", "URLError", "as", "e", ":", "log", ".", "w", "(", "'{} failed with {}'", ".", "format", "(", "id", ",", "e", ")", ")" ]
[ "str", "/", "int", "-", ">", "None" ]
soimort/you-get
def vimeo_download_by_channel_id(channel_id, output_dir='.', merge=False, info_only=False, **kwargs): """str/int->None""" html = get_content('https://api.vimeo.com/channels/{channel_id}/videos?access_token={access_token}'.format(channel_id=channel_id, access_token=access_token)) data = loads(html) id_list = [] #print(data) for i in data['data']: id_list.append(match1(i['uri'], r'/videos/(\w+)')) for id in id_list: try: vimeo_download_by_id(id, None, output_dir, merge, info_only, **kwargs) except urllib.error.URLError as e: log.w('{} failed with {}'.format(id, e))
str/int->None
vimeo_download_by_channel_id
src/you_get/extractors/vimeo.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/vimeo.py#L22-L36
b746ac01c9f39de94cac2d56f665285b0523b974
def vimeo_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs): ''' try: # normal Vimeo video html = get_content('https://vimeo.com/' + id) cfg_patt = r'clip_page_config\s*=\s*(\{.+?\});' cfg = json.loads(match1(html, cfg_patt)) video_page = get_content(cfg['player']['config_url'], headers=fake_headers) title = cfg['clip']['title'] info = loads(video_page) except: # embedded player - referer may be required if 'referer' in kwargs: fake_headers['Referer'] = kwargs['referer'] video_page = get_content('http://player.vimeo.com/video/%s' % id, headers=fake_headers) title = r1(r'<title>([^<]+)</title>', video_page) info = loads(match1(video_page, r'var t=(\{.+?\});')) streams = info['request']['files']['progressive'] streams = sorted(streams, key=lambda i: i['height']) url = streams[-1]['url'] type, ext, size = url_info(url, faker=True) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge=merge, faker=True) ''' site = VimeoExtractor() site.download_by_vid(id, info_only=info_only, output_dir=output_dir, merge=merge, **kwargs)
[ "def", "vimeo_download_by_id", "(", "id", ",", "title", "=", "None", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "site", "=", "VimeoExtractor", "(", ")", "site", ".", "download_by_vid", "(", "id", ",", "info_only", "=", "info_only", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "*", "*", "kwargs", ")" ]
[ "try", ":", "#", "normal", "Vimeo", "video", "html", "=", "get_content", "(", "https", ":", "//", "vimeo", ".", "com", "/", "+", "id", ")", "cfg_patt", "=", "r", "clip_page_config", "\\", "s", "*", "=", "\\", "s", "*", "(", "\\", "{", ".", "+", "?", "\\", "}", ")", ";", "cfg", "=", "json", ".", "loads", "(", "match1", "(", "html", "cfg_patt", "))", "video_page", "=", "get_content", "(", "cfg", "[", "player", "]", "[", "config_url", "]", "headers", "=", "fake_headers", ")", "title", "=", "cfg", "[", "clip", "]", "[", "title", "]", "info", "=", "loads", "(", "video_page", ")", "except", ":", "#", "embedded", "player", "-", "referer", "may", "be", "required", "if", "referer", "in", "kwargs", ":", "fake_headers", "[", "Referer", "]", "=", "kwargs", "[", "referer", "]" ]
soimort/you-get
def vimeo_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs): ''' try: # normal Vimeo video html = get_content('https://vimeo.com/' + id) cfg_patt = r'clip_page_config\s*=\s*(\{.+?\});' cfg = json.loads(match1(html, cfg_patt)) video_page = get_content(cfg['player']['config_url'], headers=fake_headers) title = cfg['clip']['title'] info = loads(video_page) except: # embedded player - referer may be required if 'referer' in kwargs: fake_headers['Referer'] = kwargs['referer'] video_page = get_content('http://player.vimeo.com/video/%s' % id, headers=fake_headers) title = r1(r'<title>([^<]+)</title>', video_page) info = loads(match1(video_page, r'var t=(\{.+?\});')) streams = info['request']['files']['progressive'] streams = sorted(streams, key=lambda i: i['height']) url = streams[-1]['url'] type, ext, size = url_info(url, faker=True) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge=merge, faker=True) ''' site = VimeoExtractor() site.download_by_vid(id, info_only=info_only, output_dir=output_dir, merge=merge, **kwargs)
try: # normal Vimeo video html = get_content('https://vimeo.com/' + id) cfg_patt = r'clip_page_config\s*=\s*(\{.+?\});' cfg = json.loads(match1(html, cfg_patt)) video_page = get_content(cfg['player']['config_url'], headers=fake_headers) title = cfg['clip']['title'] info = loads(video_page) except: # embedded player - referer may be required if 'referer' in kwargs: fake_headers['Referer'] = kwargs['referer'] video_page = get_content('http://player.vimeo.com/video/%s' % id, headers=fake_headers) title = r1(r'<title>([^<]+)</title>', video_page) info = loads(match1(video_page, r'var t=(\{.+?\});')) streams = info['request']['files']['progressive'] streams = sorted(streams, key=lambda i: i['height']) url = streams[-1]['url'] type, ext, size = url_info(url, faker=True) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge=merge, faker=True)
vimeo_download_by_id
src/you_get/extractors/vimeo.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/vimeo.py#L134-L164
b746ac01c9f39de94cac2d56f665285b0523b974
def ckplayer_get_info_by_xml(ckinfo): """str->dict Information for CKPlayer API content.""" e = ET.XML(ckinfo) video_dict = {'title': '', #'duration': 0, 'links': [], 'size': 0, 'flashvars': '',} dictified = dictify(e)['ckplayer'] if 'info' in dictified: if '_text' in dictified['info'][0]['title'][0]: #title video_dict['title'] = dictified['info'][0]['title'][0]['_text'].strip() #if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration #video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip() if '_text' in dictified['video'][0]['size'][0]: #size exists for 1 piece video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictified['video']]) if '_text' in dictified['video'][0]['file'][0]: #link exist video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictified['video']] if '_text' in dictified['flashvars'][0]: video_dict['flashvars'] = dictified['flashvars'][0]['_text'].strip() return video_dict
[ "def", "ckplayer_get_info_by_xml", "(", "ckinfo", ")", ":", "e", "=", "ET", ".", "XML", "(", "ckinfo", ")", "video_dict", "=", "{", "'title'", ":", "''", ",", "#'duration': 0,", "'links'", ":", "[", "]", ",", "'size'", ":", "0", ",", "'flashvars'", ":", "''", ",", "}", "dictified", "=", "dictify", "(", "e", ")", "[", "'ckplayer'", "]", "if", "'info'", "in", "dictified", ":", "if", "'_text'", "in", "dictified", "[", "'info'", "]", "[", "0", "]", "[", "'title'", "]", "[", "0", "]", ":", "#title", "video_dict", "[", "'title'", "]", "=", "dictified", "[", "'info'", "]", "[", "0", "]", "[", "'title'", "]", "[", "0", "]", "[", "'_text'", "]", ".", "strip", "(", ")", "#if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration", "#video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip()", "if", "'_text'", "in", "dictified", "[", "'video'", "]", "[", "0", "]", "[", "'size'", "]", "[", "0", "]", ":", "#size exists for 1 piece", "video_dict", "[", "'size'", "]", "=", "sum", "(", "[", "int", "(", "i", "[", "'size'", "]", "[", "0", "]", "[", "'_text'", "]", ")", "for", "i", "in", "dictified", "[", "'video'", "]", "]", ")", "if", "'_text'", "in", "dictified", "[", "'video'", "]", "[", "0", "]", "[", "'file'", "]", "[", "0", "]", ":", "#link exist", "video_dict", "[", "'links'", "]", "=", "[", "i", "[", "'file'", "]", "[", "0", "]", "[", "'_text'", "]", ".", "strip", "(", ")", "for", "i", "in", "dictified", "[", "'video'", "]", "]", "if", "'_text'", "in", "dictified", "[", "'flashvars'", "]", "[", "0", "]", ":", "video_dict", "[", "'flashvars'", "]", "=", "dictified", "[", "'flashvars'", "]", "[", "0", "]", "[", "'_text'", "]", ".", "strip", "(", ")", "return", "video_dict" ]
[ "str", "-", ">", "dict", "Information", "for", "CKPlayer", "API", "content", "." ]
soimort/you-get
def ckplayer_get_info_by_xml(ckinfo): """str->dict Information for CKPlayer API content.""" e = ET.XML(ckinfo) video_dict = {'title': '', #'duration': 0, 'links': [], 'size': 0, 'flashvars': '',} dictified = dictify(e)['ckplayer'] if 'info' in dictified: if '_text' in dictified['info'][0]['title'][0]: #title video_dict['title'] = dictified['info'][0]['title'][0]['_text'].strip() #if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration #video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip() if '_text' in dictified['video'][0]['size'][0]: #size exists for 1 piece video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictified['video']]) if '_text' in dictified['video'][0]['file'][0]: #link exist video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictified['video']] if '_text' in dictified['flashvars'][0]: video_dict['flashvars'] = dictified['flashvars'][0]['_text'].strip() return video_dict
str->dict Information for CKPlayer API content.
ckplayer_get_info_by_xml
src/you_get/extractors/ckplayer.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ckplayer.py#L13-L39
b746ac01c9f39de94cac2d56f665285b0523b974
def get_video_url_from_video_id(video_id): """Splicing URLs according to video ID to get video details""" # from js data = [""] * 256 for index, _ in enumerate(data): t = index for i in range(8): t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1) data[index] = t def tmp(): rand_num = random.random() path = "/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}".format(video_id=video_id, random_num=str(rand_num)[2:]) e = o = r = -1 i, a = 0, len(path) while i < a: e = ord(path[i]) i += 1 if e < 128: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)] else: if e < 2048: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] else: if 55296 <= e < 57344: e = (1023 & e) + 64 i += 1 o = 1023 & t.url(i) r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))] else: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] return "https://ib.365yg.com{path}&s={param}".format(path=path, param=unsigned_right_shitf(r ^ -1, 0)) while 1: url = tmp() if url.split("=")[-1][0] != "-": # 参数s不能为负数 return url
[ "def", "get_video_url_from_video_id", "(", "video_id", ")", ":", "# from js", "data", "=", "[", "\"\"", "]", "*", "256", "for", "index", ",", "_", "in", "enumerate", "(", "data", ")", ":", "t", "=", "index", "for", "i", "in", "range", "(", "8", ")", ":", "t", "=", "-", "306674912", "^", "unsigned_right_shitf", "(", "t", ",", "1", ")", "if", "1", "&", "t", "else", "unsigned_right_shitf", "(", "t", ",", "1", ")", "data", "[", "index", "]", "=", "t", "def", "tmp", "(", ")", ":", "rand_num", "=", "random", ".", "random", "(", ")", "path", "=", "\"/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}\"", ".", "format", "(", "video_id", "=", "video_id", ",", "random_num", "=", "str", "(", "rand_num", ")", "[", "2", ":", "]", ")", "e", "=", "o", "=", "r", "=", "-", "1", "i", ",", "a", "=", "0", ",", "len", "(", "path", ")", "while", "i", "<", "a", ":", "e", "=", "ord", "(", "path", "[", "i", "]", ")", "i", "+=", "1", "if", "e", "<", "128", ":", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "e", ")", "]", "else", ":", "if", "e", "<", "2048", ":", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "192", "|", "e", ">>", "6", "&", "31", ")", ")", "]", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "128", "|", "63", "&", "e", ")", ")", "]", "else", ":", "if", "55296", "<=", "e", "<", "57344", ":", "e", "=", "(", "1023", "&", "e", ")", "+", "64", "i", "+=", "1", "o", "=", "1023", "&", "t", ".", "url", "(", "i", ")", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "240", "|", "e", ">>", "8", "&", "7", ")", ")", "]", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "128", "|", "e", ">>", "2", "&", "63", ")", ")", "]", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "128", "|", "o", ">>", "6", "&", "15", "|", "(", "3", "&", "e", ")", "<<", "4", ")", ")", "]", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "128", "|", "63", "&", "o", ")", ")", "]", "else", ":", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "224", "|", "e", ">>", "12", "&", "15", ")", ")", "]", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "128", "|", "e", ">>", "6", "&", "63", ")", ")", "]", "r", "=", "unsigned_right_shitf", "(", "r", ",", "8", ")", "^", "data", "[", "255", "&", "(", "r", "^", "(", "128", "|", "63", "&", "e", ")", ")", "]", "return", "\"https://ib.365yg.com{path}&s={param}\"", ".", "format", "(", "path", "=", "path", ",", "param", "=", "unsigned_right_shitf", "(", "r", "^", "-", "1", ",", "0", ")", ")", "while", "1", ":", "url", "=", "tmp", "(", ")", "if", "url", ".", "split", "(", "\"=\"", ")", "[", "-", "1", "]", "[", "0", "]", "!=", "\"-\"", ":", "# 参数s不能为负数", "return", "url" ]
[ "Splicing", "URLs", "according", "to", "video", "ID", "to", "get", "video", "details" ]
soimort/you-get
def get_video_url_from_video_id(video_id): """Splicing URLs according to video ID to get video details""" # from js data = [""] * 256 for index, _ in enumerate(data): t = index for i in range(8): t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1) data[index] = t def tmp(): rand_num = random.random() path = "/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}".format(video_id=video_id, random_num=str(rand_num)[2:]) e = o = r = -1 i, a = 0, len(path) while i < a: e = ord(path[i]) i += 1 if e < 128: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)] else: if e < 2048: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] else: if 55296 <= e < 57344: e = (1023 & e) + 64 i += 1 o = 1023 & t.url(i) r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))] else: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] return "https://ib.365yg.com{path}&s={param}".format(path=path, param=unsigned_right_shitf(r ^ -1, 0)) while 1: url = tmp() if url.split("=")[-1][0] != "-": # 参数s不能为负数 return url
Splicing URLs according to video ID to get video details
get_video_url_from_video_id
src/you_get/extractors/ixigua.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ixigua.py#L34-L78
b746ac01c9f39de94cac2d56f665285b0523b974
def get_vid_from_url(url): """Extracts video ID from URL. """ vid = match1(url, 'https?://www.mgtv.com/(?:b|l)/\d+/(\d+).html') if not vid: vid = match1(url, 'https?://www.mgtv.com/hz/bdpz/\d+/(\d+).html') return vid
[ "def", "get_vid_from_url", "(", "url", ")", ":", "vid", "=", "match1", "(", "url", ",", "'https?://www.mgtv.com/(?:b|l)/\\d+/(\\d+).html'", ")", "if", "not", "vid", ":", "vid", "=", "match1", "(", "url", ",", "'https?://www.mgtv.com/hz/bdpz/\\d+/(\\d+).html'", ")", "return", "vid" ]
[ "Extracts", "video", "ID", "from", "URL", "." ]
soimort/you-get
def get_vid_from_url(url): """Extracts video ID from URL. """ vid = match1(url, 'https?://www.mgtv.com/(?:b|l)/\d+/(\d+).html') if not vid: vid = match1(url, 'https?://www.mgtv.com/hz/bdpz/\d+/(\d+).html') return vid
Extracts video ID from URL.
MGTV.get_vid_from_url
src/you_get/extractors/mgtv.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/mgtv.py#L27-L33
b746ac01c9f39de94cac2d56f665285b0523b974
def get_mgtv_real_url(url): """str->list of str Give you the real URLs.""" content = loads(get_content(url)) m3u_url = content['info'] split = urlsplit(m3u_url) base_url = "{scheme}://{netloc}{path}/".format(scheme = split[0], netloc = split[1], path = dirname(split[2])) content = get_content(content['info']) #get the REAL M3U url, maybe to be changed later? segment_list = [] segments_size = 0 for i in content.split(): if not i.startswith('#'): #not the best way, better we use the m3u8 package segment_list.append(base_url + i) # use ext-info for fast size calculate elif i.startswith('#EXT-MGTV-File-SIZE:'): segments_size += int(i[i.rfind(':')+1:]) return m3u_url, segments_size, segment_list
[ "def", "get_mgtv_real_url", "(", "url", ")", ":", "content", "=", "loads", "(", "get_content", "(", "url", ")", ")", "m3u_url", "=", "content", "[", "'info'", "]", "split", "=", "urlsplit", "(", "m3u_url", ")", "base_url", "=", "\"{scheme}://{netloc}{path}/\"", ".", "format", "(", "scheme", "=", "split", "[", "0", "]", ",", "netloc", "=", "split", "[", "1", "]", ",", "path", "=", "dirname", "(", "split", "[", "2", "]", ")", ")", "content", "=", "get_content", "(", "content", "[", "'info'", "]", ")", "#get the REAL M3U url, maybe to be changed later?", "segment_list", "=", "[", "]", "segments_size", "=", "0", "for", "i", "in", "content", ".", "split", "(", ")", ":", "if", "not", "i", ".", "startswith", "(", "'#'", ")", ":", "#not the best way, better we use the m3u8 package", "segment_list", ".", "append", "(", "base_url", "+", "i", ")", "# use ext-info for fast size calculate", "elif", "i", ".", "startswith", "(", "'#EXT-MGTV-File-SIZE:'", ")", ":", "segments_size", "+=", "int", "(", "i", "[", "i", ".", "rfind", "(", "':'", ")", "+", "1", ":", "]", ")", "return", "m3u_url", ",", "segments_size", ",", "segment_list" ]
[ "str", "-", ">", "list", "of", "str", "Give", "you", "the", "real", "URLs", "." ]
soimort/you-get
def get_mgtv_real_url(url): """str->list of str Give you the real URLs.""" content = loads(get_content(url)) m3u_url = content['info'] split = urlsplit(m3u_url) base_url = "{scheme}://{netloc}{path}/".format(scheme = split[0], netloc = split[1], path = dirname(split[2])) content = get_content(content['info']) #get the REAL M3U url, maybe to be changed later? segment_list = [] segments_size = 0 for i in content.split(): if not i.startswith('#'): #not the best way, better we use the m3u8 package segment_list.append(base_url + i) # use ext-info for fast size calculate elif i.startswith('#EXT-MGTV-File-SIZE:'): segments_size += int(i[i.rfind(':')+1:]) return m3u_url, segments_size, segment_list
str->list of str Give you the real URLs.
MGTV.get_mgtv_real_url
src/you_get/extractors/mgtv.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/mgtv.py#L37-L58
b746ac01c9f39de94cac2d56f665285b0523b974
def get_head(repo_path): """Get (branch, commit) from HEAD of a git repo.""" try: ref = open(os.path.join(repo_path, '.git', 'HEAD'), 'r').read().strip()[5:].split('/') branch = ref[-1] commit = open(os.path.join(repo_path, '.git', *ref), 'r').read().strip()[:7] return branch, commit except: return None
[ "def", "get_head", "(", "repo_path", ")", ":", "try", ":", "ref", "=", "open", "(", "os", ".", "path", ".", "join", "(", "repo_path", ",", "'.git'", ",", "'HEAD'", ")", ",", "'r'", ")", ".", "read", "(", ")", ".", "strip", "(", ")", "[", "5", ":", "]", ".", "split", "(", "'/'", ")", "branch", "=", "ref", "[", "-", "1", "]", "commit", "=", "open", "(", "os", ".", "path", ".", "join", "(", "repo_path", ",", "'.git'", ",", "*", "ref", ")", ",", "'r'", ")", ".", "read", "(", ")", ".", "strip", "(", ")", "[", ":", "7", "]", "return", "branch", ",", "commit", "except", ":", "return", "None" ]
[ "Get", "(", "branch", "commit", ")", "from", "HEAD", "of", "a", "git", "repo", "." ]
soimort/you-get
def get_head(repo_path): """Get (branch, commit) from HEAD of a git repo.""" try: ref = open(os.path.join(repo_path, '.git', 'HEAD'), 'r').read().strip()[5:].split('/') branch = ref[-1] commit = open(os.path.join(repo_path, '.git', *ref), 'r').read().strip()[:7] return branch, commit except: return None
Get (branch, commit) from HEAD of a git repo.
get_head
src/you_get/util/git.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/git.py#L7-L15
b746ac01c9f39de94cac2d56f665285b0523b974
def legitimize(text, os=detect_os()): """Converts a string to a valid filename. """ # POSIX systems text = text.translate({ 0: None, ord('/'): '-', ord('|'): '-', }) # FIXME: do some filesystem detection if os == 'windows' or os == 'cygwin' or os == 'wsl': # Windows (non-POSIX namespace) text = text.translate({ # Reserved in Windows VFAT and NTFS ord(':'): '-', ord('*'): '-', ord('?'): '-', ord('\\'): '-', ord('\"'): '\'', # Reserved in Windows VFAT ord('+'): '-', ord('<'): '-', ord('>'): '-', ord('['): '(', ord(']'): ')', ord('\t'): ' ', }) else: # *nix if os == 'mac': # Mac OS HFS+ text = text.translate({ ord(':'): '-', }) # Remove leading . if text.startswith("."): text = text[1:] text = text[:80] # Trim to 82 Unicode characters long return text
[ "def", "legitimize", "(", "text", ",", "os", "=", "detect_os", "(", ")", ")", ":", "# POSIX systems", "text", "=", "text", ".", "translate", "(", "{", "0", ":", "None", ",", "ord", "(", "'/'", ")", ":", "'-'", ",", "ord", "(", "'|'", ")", ":", "'-'", ",", "}", ")", "# FIXME: do some filesystem detection", "if", "os", "==", "'windows'", "or", "os", "==", "'cygwin'", "or", "os", "==", "'wsl'", ":", "# Windows (non-POSIX namespace)", "text", "=", "text", ".", "translate", "(", "{", "# Reserved in Windows VFAT and NTFS", "ord", "(", "':'", ")", ":", "'-'", ",", "ord", "(", "'*'", ")", ":", "'-'", ",", "ord", "(", "'?'", ")", ":", "'-'", ",", "ord", "(", "'\\\\'", ")", ":", "'-'", ",", "ord", "(", "'\\\"'", ")", ":", "'\\''", ",", "# Reserved in Windows VFAT", "ord", "(", "'+'", ")", ":", "'-'", ",", "ord", "(", "'<'", ")", ":", "'-'", ",", "ord", "(", "'>'", ")", ":", "'-'", ",", "ord", "(", "'['", ")", ":", "'('", ",", "ord", "(", "']'", ")", ":", "')'", ",", "ord", "(", "'\\t'", ")", ":", "' '", ",", "}", ")", "else", ":", "# *nix", "if", "os", "==", "'mac'", ":", "# Mac OS HFS+", "text", "=", "text", ".", "translate", "(", "{", "ord", "(", "':'", ")", ":", "'-'", ",", "}", ")", "# Remove leading .", "if", "text", ".", "startswith", "(", "\".\"", ")", ":", "text", "=", "text", "[", "1", ":", "]", "text", "=", "text", "[", ":", "80", "]", "# Trim to 82 Unicode characters long", "return", "text" ]
[ "Converts", "a", "string", "to", "a", "valid", "filename", "." ]
soimort/you-get
def legitimize(text, os=detect_os()): """Converts a string to a valid filename. """ # POSIX systems text = text.translate({ 0: None, ord('/'): '-', ord('|'): '-', }) # FIXME: do some filesystem detection if os == 'windows' or os == 'cygwin' or os == 'wsl': # Windows (non-POSIX namespace) text = text.translate({ # Reserved in Windows VFAT and NTFS ord(':'): '-', ord('*'): '-', ord('?'): '-', ord('\\'): '-', ord('\"'): '\'', # Reserved in Windows VFAT ord('+'): '-', ord('<'): '-', ord('>'): '-', ord('['): '(', ord(']'): ')', ord('\t'): ' ', }) else: # *nix if os == 'mac': # Mac OS HFS+ text = text.translate({ ord(':'): '-', }) # Remove leading . if text.startswith("."): text = text[1:] text = text[:80] # Trim to 82 Unicode characters long return text
Converts a string to a valid filename.
legitimize
src/you_get/util/fs.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/fs.py#L5-L47
b746ac01c9f39de94cac2d56f665285b0523b974
def get_terminal_size(): """Get (width, height) of the current terminal.""" try: import fcntl, termios, struct # fcntl module only available on Unix return struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234')) except: return (40, 80)
[ "def", "get_terminal_size", "(", ")", ":", "try", ":", "import", "fcntl", ",", "termios", ",", "struct", "# fcntl module only available on Unix", "return", "struct", ".", "unpack", "(", "'hh'", ",", "fcntl", ".", "ioctl", "(", "1", ",", "termios", ".", "TIOCGWINSZ", ",", "'1234'", ")", ")", "except", ":", "return", "(", "40", ",", "80", ")" ]
[ "Get", "(", "width", "height", ")", "of", "the", "current", "terminal", "." ]
soimort/you-get
def get_terminal_size(): """Get (width, height) of the current terminal.""" try: import fcntl, termios, struct # fcntl module only available on Unix return struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234')) except: return (40, 80)
Get (width, height) of the current terminal.
get_terminal_size
src/you_get/util/term.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/term.py#L3-L9
b746ac01c9f39de94cac2d56f665285b0523b974
def cbs_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads CBS videos by URL. """ html = get_content(url) pid = match1(html, r'video\.settings\.pid\s*=\s*\'([^\']+)\'') title = match1(html, r'video\.settings\.title\s*=\s*\"([^\"]+)\"') theplatform_download_by_pid(pid, title, output_dir=output_dir, merge=merge, info_only=info_only)
[ "def", "cbs_download", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "html", "=", "get_content", "(", "url", ")", "pid", "=", "match1", "(", "html", ",", "r'video\\.settings\\.pid\\s*=\\s*\\'([^\\']+)\\''", ")", "title", "=", "match1", "(", "html", ",", "r'video\\.settings\\.title\\s*=\\s*\\\"([^\\\"]+)\\\"'", ")", "theplatform_download_by_pid", "(", "pid", ",", "title", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")" ]
[ "Downloads", "CBS", "videos", "by", "URL", "." ]
soimort/you-get
def cbs_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads CBS videos by URL. """ html = get_content(url) pid = match1(html, r'video\.settings\.pid\s*=\s*\'([^\']+)\'') title = match1(html, r'video\.settings\.title\s*=\s*\"([^\"]+)\"') theplatform_download_by_pid(pid, title, output_dir=output_dir, merge=merge, info_only=info_only)
Downloads CBS videos by URL.
cbs_download
src/you_get/extractors/cbs.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/cbs.py#L9-L17
b746ac01c9f39de94cac2d56f665285b0523b974
def download(self, **kwargs): """Override the original one Ugly ugly dirty hack""" if 'json_output' in kwargs and kwargs['json_output']: json_output.output(self) elif 'info_only' in kwargs and kwargs['info_only']: if 'stream_id' in kwargs and kwargs['stream_id']: # Display the stream stream_id = kwargs['stream_id'] if 'index' not in kwargs: self.p(stream_id) else: self.p_i(stream_id) else: # Display all available streams if 'index' not in kwargs: self.p([]) else: stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_i(stream_id) else: if 'stream_id' in kwargs and kwargs['stream_id']: # Download the stream stream_id = kwargs['stream_id'] else: # Download stream with the best quality stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] if 'index' not in kwargs: self.p(stream_id) else: self.p_i(stream_id) if stream_id in self.streams: urls = self.streams[stream_id]['src'] ext = self.streams[stream_id]['container'] total_size = self.streams[stream_id]['size'] else: urls = self.dash_streams[stream_id]['src'] ext = self.dash_streams[stream_id]['container'] total_size = self.dash_streams[stream_id]['size'] if not urls: log.wtf('[Failed] Cannot extract video source.') # For legacy main() #Here's the change!! download_url_ffmpeg(urls[0], self.title, 'mp4', output_dir=kwargs['output_dir'], merge=kwargs['merge'], stream=False) if not kwargs['caption']: print('Skipping captions.') return for lang in self.caption_tracks: filename = '%s.%s.srt' % (get_filename(self.title), lang) print('Saving %s ... ' % filename, end="", flush=True) srt = self.caption_tracks[lang] with open(os.path.join(kwargs['output_dir'], filename), 'w', encoding='utf-8') as x: x.write(srt) print('Done.')
[ "def", "download", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "'json_output'", "in", "kwargs", "and", "kwargs", "[", "'json_output'", "]", ":", "json_output", ".", "output", "(", "self", ")", "elif", "'info_only'", "in", "kwargs", "and", "kwargs", "[", "'info_only'", "]", ":", "if", "'stream_id'", "in", "kwargs", "and", "kwargs", "[", "'stream_id'", "]", ":", "# Display the stream", "stream_id", "=", "kwargs", "[", "'stream_id'", "]", "if", "'index'", "not", "in", "kwargs", ":", "self", ".", "p", "(", "stream_id", ")", "else", ":", "self", ".", "p_i", "(", "stream_id", ")", "else", ":", "# Display all available streams", "if", "'index'", "not", "in", "kwargs", ":", "self", ".", "p", "(", "[", "]", ")", "else", ":", "stream_id", "=", "self", ".", "streams_sorted", "[", "0", "]", "[", "'id'", "]", "if", "'id'", "in", "self", ".", "streams_sorted", "[", "0", "]", "else", "self", ".", "streams_sorted", "[", "0", "]", "[", "'itag'", "]", "self", ".", "p_i", "(", "stream_id", ")", "else", ":", "if", "'stream_id'", "in", "kwargs", "and", "kwargs", "[", "'stream_id'", "]", ":", "# Download the stream", "stream_id", "=", "kwargs", "[", "'stream_id'", "]", "else", ":", "# Download stream with the best quality", "stream_id", "=", "self", ".", "streams_sorted", "[", "0", "]", "[", "'id'", "]", "if", "'id'", "in", "self", ".", "streams_sorted", "[", "0", "]", "else", "self", ".", "streams_sorted", "[", "0", "]", "[", "'itag'", "]", "if", "'index'", "not", "in", "kwargs", ":", "self", ".", "p", "(", "stream_id", ")", "else", ":", "self", ".", "p_i", "(", "stream_id", ")", "if", "stream_id", "in", "self", ".", "streams", ":", "urls", "=", "self", ".", "streams", "[", "stream_id", "]", "[", "'src'", "]", "ext", "=", "self", ".", "streams", "[", "stream_id", "]", "[", "'container'", "]", "total_size", "=", "self", ".", "streams", "[", "stream_id", "]", "[", "'size'", "]", "else", ":", "urls", "=", "self", ".", "dash_streams", "[", "stream_id", "]", "[", "'src'", "]", "ext", "=", "self", ".", "dash_streams", "[", "stream_id", "]", "[", "'container'", "]", "total_size", "=", "self", ".", "dash_streams", "[", "stream_id", "]", "[", "'size'", "]", "if", "not", "urls", ":", "log", ".", "wtf", "(", "'[Failed] Cannot extract video source.'", ")", "# For legacy main()", "#Here's the change!!", "download_url_ffmpeg", "(", "urls", "[", "0", "]", ",", "self", ".", "title", ",", "'mp4'", ",", "output_dir", "=", "kwargs", "[", "'output_dir'", "]", ",", "merge", "=", "kwargs", "[", "'merge'", "]", ",", "stream", "=", "False", ")", "if", "not", "kwargs", "[", "'caption'", "]", ":", "print", "(", "'Skipping captions.'", ")", "return", "for", "lang", "in", "self", ".", "caption_tracks", ":", "filename", "=", "'%s.%s.srt'", "%", "(", "get_filename", "(", "self", ".", "title", ")", ",", "lang", ")", "print", "(", "'Saving %s ... '", "%", "filename", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "srt", "=", "self", ".", "caption_tracks", "[", "lang", "]", "with", "open", "(", "os", ".", "path", ".", "join", "(", "kwargs", "[", "'output_dir'", "]", ",", "filename", ")", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "x", ":", "x", ".", "write", "(", "srt", ")", "print", "(", "'Done.'", ")" ]
[ "Override", "the", "original", "one", "Ugly", "ugly", "dirty", "hack" ]
soimort/you-get
def download(self, **kwargs): """Override the original one Ugly ugly dirty hack""" if 'json_output' in kwargs and kwargs['json_output']: json_output.output(self) elif 'info_only' in kwargs and kwargs['info_only']: if 'stream_id' in kwargs and kwargs['stream_id']: # Display the stream stream_id = kwargs['stream_id'] if 'index' not in kwargs: self.p(stream_id) else: self.p_i(stream_id) else: # Display all available streams if 'index' not in kwargs: self.p([]) else: stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_i(stream_id) else: if 'stream_id' in kwargs and kwargs['stream_id']: # Download the stream stream_id = kwargs['stream_id'] else: # Download stream with the best quality stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] if 'index' not in kwargs: self.p(stream_id) else: self.p_i(stream_id) if stream_id in self.streams: urls = self.streams[stream_id]['src'] ext = self.streams[stream_id]['container'] total_size = self.streams[stream_id]['size'] else: urls = self.dash_streams[stream_id]['src'] ext = self.dash_streams[stream_id]['container'] total_size = self.dash_streams[stream_id]['size'] if not urls: log.wtf('[Failed] Cannot extract video source.') # For legacy main() #Here's the change!! download_url_ffmpeg(urls[0], self.title, 'mp4', output_dir=kwargs['output_dir'], merge=kwargs['merge'], stream=False) if not kwargs['caption']: print('Skipping captions.') return for lang in self.caption_tracks: filename = '%s.%s.srt' % (get_filename(self.title), lang) print('Saving %s ... ' % filename, end="", flush=True) srt = self.caption_tracks[lang] with open(os.path.join(kwargs['output_dir'], filename), 'w', encoding='utf-8') as x: x.write(srt) print('Done.')
Override the original one Ugly ugly dirty hack
Iqiyi.download
src/you_get/extractors/iqiyi.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/iqiyi.py#L158-L218
b746ac01c9f39de94cac2d56f665285b0523b974
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs): """str, str, str, bool, bool ->None Download Acfun video by vid. Call Acfun API, decide which site to use, and pass the job to its extractor. """ #first call the main parasing API info = json.loads(get_content('http://www.acfun.cn/video/getVideo.aspx?id=' + vid)) sourceType = info['sourceType'] #decide sourceId to know which extractor to use if 'sourceId' in info: sourceId = info['sourceId'] # danmakuId = info['danmakuId'] #call extractor decided by sourceId if sourceType == 'sina': sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'youku': youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) elif sourceType == 'tudou': tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'qq': qq_download_by_vid(sourceId, title, True, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'letv': letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'zhuzhan': #As in Jul.28.2016, Acfun is using embsig to anti hotlink so we need to pass this #In Mar. 2017 there is a dedicated ``acfun_proxy'' in youku cloud player #old code removed url = 'http://www.acfun.cn/v/ac' + vid yk_streams = youku_acfun_proxy(info['sourceId'], info['encode'], url) seq = ['mp4hd3', 'mp4hd2', 'mp4hd', 'flvhd'] for t in seq: if yk_streams.get(t): preferred = yk_streams[t] break #total_size in the json could be incorrect(F.I. 0) size = 0 for url in preferred[0]: _, _, seg_size = url_info(url) size += seg_size #fallback to flvhd is not quite possible if re.search(r'fid=[0-9A-Z\-]*.flv', preferred[0][0]): ext = 'flv' else: ext = 'mp4' print_info(site_info, title, ext, size) if not info_only: download_urls(preferred[0], title, ext, size, output_dir=output_dir, merge=merge) else: raise NotImplementedError(sourceType) if not info_only and not dry_run: if not kwargs['caption']: print('Skipping danmaku.') return try: title = get_filename(title) print('Downloading %s ...\n' % (title + '.cmt.json')) cmt = get_srt_json(vid) with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x: x.write(cmt) except: pass
[ "def", "acfun_download_by_vid", "(", "vid", ",", "title", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "#first call the main parasing API", "info", "=", "json", ".", "loads", "(", "get_content", "(", "'http://www.acfun.cn/video/getVideo.aspx?id='", "+", "vid", ")", ")", "sourceType", "=", "info", "[", "'sourceType'", "]", "#decide sourceId to know which extractor to use", "if", "'sourceId'", "in", "info", ":", "sourceId", "=", "info", "[", "'sourceId'", "]", "# danmakuId = info['danmakuId']", "#call extractor decided by sourceId", "if", "sourceType", "==", "'sina'", ":", "sina_download_by_vid", "(", "sourceId", ",", "title", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")", "elif", "sourceType", "==", "'youku'", ":", "youku_download_by_vid", "(", "sourceId", ",", "title", "=", "title", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ",", "*", "*", "kwargs", ")", "elif", "sourceType", "==", "'tudou'", ":", "tudou_download_by_iid", "(", "sourceId", ",", "title", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")", "elif", "sourceType", "==", "'qq'", ":", "qq_download_by_vid", "(", "sourceId", ",", "title", ",", "True", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")", "elif", "sourceType", "==", "'letv'", ":", "letvcloud_download_by_vu", "(", "sourceId", ",", "'2d8c027396'", ",", "title", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ")", "elif", "sourceType", "==", "'zhuzhan'", ":", "#As in Jul.28.2016, Acfun is using embsig to anti hotlink so we need to pass this", "#In Mar. 2017 there is a dedicated ``acfun_proxy'' in youku cloud player", "#old code removed", "url", "=", "'http://www.acfun.cn/v/ac'", "+", "vid", "yk_streams", "=", "youku_acfun_proxy", "(", "info", "[", "'sourceId'", "]", ",", "info", "[", "'encode'", "]", ",", "url", ")", "seq", "=", "[", "'mp4hd3'", ",", "'mp4hd2'", ",", "'mp4hd'", ",", "'flvhd'", "]", "for", "t", "in", "seq", ":", "if", "yk_streams", ".", "get", "(", "t", ")", ":", "preferred", "=", "yk_streams", "[", "t", "]", "break", "#total_size in the json could be incorrect(F.I. 0)", "size", "=", "0", "for", "url", "in", "preferred", "[", "0", "]", ":", "_", ",", "_", ",", "seg_size", "=", "url_info", "(", "url", ")", "size", "+=", "seg_size", "#fallback to flvhd is not quite possible", "if", "re", ".", "search", "(", "r'fid=[0-9A-Z\\-]*.flv'", ",", "preferred", "[", "0", "]", "[", "0", "]", ")", ":", "ext", "=", "'flv'", "else", ":", "ext", "=", "'mp4'", "print_info", "(", "site_info", ",", "title", ",", "ext", ",", "size", ")", "if", "not", "info_only", ":", "download_urls", "(", "preferred", "[", "0", "]", ",", "title", ",", "ext", ",", "size", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")", "else", ":", "raise", "NotImplementedError", "(", "sourceType", ")", "if", "not", "info_only", "and", "not", "dry_run", ":", "if", "not", "kwargs", "[", "'caption'", "]", ":", "print", "(", "'Skipping danmaku.'", ")", "return", "try", ":", "title", "=", "get_filename", "(", "title", ")", "print", "(", "'Downloading %s ...\\n'", "%", "(", "title", "+", "'.cmt.json'", ")", ")", "cmt", "=", "get_srt_json", "(", "vid", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "output_dir", ",", "title", "+", "'.cmt.json'", ")", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "x", ":", "x", ".", "write", "(", "cmt", ")", "except", ":", "pass" ]
[ "str", "str", "str", "bool", "bool", "-", ">", "None" ]
soimort/you-get
def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs): """str, str, str, bool, bool ->None Download Acfun video by vid. Call Acfun API, decide which site to use, and pass the job to its extractor. """ #first call the main parasing API info = json.loads(get_content('http://www.acfun.cn/video/getVideo.aspx?id=' + vid)) sourceType = info['sourceType'] #decide sourceId to know which extractor to use if 'sourceId' in info: sourceId = info['sourceId'] # danmakuId = info['danmakuId'] #call extractor decided by sourceId if sourceType == 'sina': sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'youku': youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) elif sourceType == 'tudou': tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'qq': qq_download_by_vid(sourceId, title, True, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'letv': letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only) elif sourceType == 'zhuzhan': #As in Jul.28.2016, Acfun is using embsig to anti hotlink so we need to pass this #In Mar. 2017 there is a dedicated ``acfun_proxy'' in youku cloud player #old code removed url = 'http://www.acfun.cn/v/ac' + vid yk_streams = youku_acfun_proxy(info['sourceId'], info['encode'], url) seq = ['mp4hd3', 'mp4hd2', 'mp4hd', 'flvhd'] for t in seq: if yk_streams.get(t): preferred = yk_streams[t] break #total_size in the json could be incorrect(F.I. 0) size = 0 for url in preferred[0]: _, _, seg_size = url_info(url) size += seg_size #fallback to flvhd is not quite possible if re.search(r'fid=[0-9A-Z\-]*.flv', preferred[0][0]): ext = 'flv' else: ext = 'mp4' print_info(site_info, title, ext, size) if not info_only: download_urls(preferred[0], title, ext, size, output_dir=output_dir, merge=merge) else: raise NotImplementedError(sourceType) if not info_only and not dry_run: if not kwargs['caption']: print('Skipping danmaku.') return try: title = get_filename(title) print('Downloading %s ...\n' % (title + '.cmt.json')) cmt = get_srt_json(vid) with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x: x.write(cmt) except: pass
str, str, str, bool, bool ->None Download Acfun video by vid. Call Acfun API, decide which site to use, and pass the job to its extractor.
acfun_download_by_vid
src/you_get/extractors/acfun.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/acfun.py#L42-L109
b746ac01c9f39de94cac2d56f665285b0523b974
def main_dev(**kwargs): """Main entry point. you-get-dev """ # Get (branch, commit) if running from a git repo. head = git.get_head(kwargs['repo_path']) # Get options and arguments. try: opts, args = getopt.getopt(sys.argv[1:], _short_options, _options) except getopt.GetoptError as e: log.wtf(""" [Fatal] {}. Try '{} --help' for more options.""".format(e, script_name)) if not opts and not args: # Display help. print(_help) # Enter GUI mode. #from .gui import gui_main #gui_main() else: conf = {} for opt, arg in opts: if opt in ('-h', '--help'): # Display help. print(_help) elif opt in ('-V', '--version'): # Display version. log.println("you-get:", log.BOLD) log.println(" version: {}".format(__version__)) if head is not None: log.println(" branch: {}\n commit: {}".format(*head)) else: log.println(" branch: {}\n commit: {}".format("(stable)", "(tag v{})".format(__version__))) log.println(" platform: {}".format(platform.platform())) log.println(" python: {}".format(sys.version.split('\n')[0])) elif opt in ('-g', '--gui'): # Run using GUI. conf['gui'] = True elif opt in ('-f', '--force'): # Force download. conf['force'] = True elif opt in ('-l', '--playlist', '--playlists'): # Download playlist whenever possible. conf['playlist'] = True if args: if 'gui' in conf and conf['gui']: # Enter GUI mode. from .gui import gui_main gui_main(*args, **conf) else: # Enter console mode. from .console import console_main console_main(*args, **conf)
[ "def", "main_dev", "(", "*", "*", "kwargs", ")", ":", "# Get (branch, commit) if running from a git repo.", "head", "=", "git", ".", "get_head", "(", "kwargs", "[", "'repo_path'", "]", ")", "# Get options and arguments.", "try", ":", "opts", ",", "args", "=", "getopt", ".", "getopt", "(", "sys", ".", "argv", "[", "1", ":", "]", ",", "_short_options", ",", "_options", ")", "except", "getopt", ".", "GetoptError", "as", "e", ":", "log", ".", "wtf", "(", "\"\"\"\n [Fatal] {}.\n Try '{} --help' for more options.\"\"\"", ".", "format", "(", "e", ",", "script_name", ")", ")", "if", "not", "opts", "and", "not", "args", ":", "# Display help.", "print", "(", "_help", ")", "# Enter GUI mode.", "#from .gui import gui_main", "#gui_main()", "else", ":", "conf", "=", "{", "}", "for", "opt", ",", "arg", "in", "opts", ":", "if", "opt", "in", "(", "'-h'", ",", "'--help'", ")", ":", "# Display help.", "print", "(", "_help", ")", "elif", "opt", "in", "(", "'-V'", ",", "'--version'", ")", ":", "# Display version.", "log", ".", "println", "(", "\"you-get:\"", ",", "log", ".", "BOLD", ")", "log", ".", "println", "(", "\" version: {}\"", ".", "format", "(", "__version__", ")", ")", "if", "head", "is", "not", "None", ":", "log", ".", "println", "(", "\" branch: {}\\n commit: {}\"", ".", "format", "(", "*", "head", ")", ")", "else", ":", "log", ".", "println", "(", "\" branch: {}\\n commit: {}\"", ".", "format", "(", "\"(stable)\"", ",", "\"(tag v{})\"", ".", "format", "(", "__version__", ")", ")", ")", "log", ".", "println", "(", "\" platform: {}\"", ".", "format", "(", "platform", ".", "platform", "(", ")", ")", ")", "log", ".", "println", "(", "\" python: {}\"", ".", "format", "(", "sys", ".", "version", ".", "split", "(", "'\\n'", ")", "[", "0", "]", ")", ")", "elif", "opt", "in", "(", "'-g'", ",", "'--gui'", ")", ":", "# Run using GUI.", "conf", "[", "'gui'", "]", "=", "True", "elif", "opt", "in", "(", "'-f'", ",", "'--force'", ")", ":", "# Force download.", "conf", "[", "'force'", "]", "=", "True", "elif", "opt", "in", "(", "'-l'", ",", "'--playlist'", ",", "'--playlists'", ")", ":", "# Download playlist whenever possible.", "conf", "[", "'playlist'", "]", "=", "True", "if", "args", ":", "if", "'gui'", "in", "conf", "and", "conf", "[", "'gui'", "]", ":", "# Enter GUI mode.", "from", ".", "gui", "import", "gui_main", "gui_main", "(", "*", "args", ",", "*", "*", "conf", ")", "else", ":", "# Enter console mode.", "from", ".", "console", "import", "console_main", "console_main", "(", "*", "args", ",", "*", "*", "conf", ")" ]
[ "Main", "entry", "point", ".", "you", "-", "get", "-", "dev" ]
soimort/you-get
def main_dev(**kwargs): """Main entry point. you-get-dev """ # Get (branch, commit) if running from a git repo. head = git.get_head(kwargs['repo_path']) # Get options and arguments. try: opts, args = getopt.getopt(sys.argv[1:], _short_options, _options) except getopt.GetoptError as e: log.wtf(""" [Fatal] {}. Try '{} --help' for more options.""".format(e, script_name)) if not opts and not args: # Display help. print(_help) # Enter GUI mode. #from .gui import gui_main #gui_main() else: conf = {} for opt, arg in opts: if opt in ('-h', '--help'): # Display help. print(_help) elif opt in ('-V', '--version'): # Display version. log.println("you-get:", log.BOLD) log.println(" version: {}".format(__version__)) if head is not None: log.println(" branch: {}\n commit: {}".format(*head)) else: log.println(" branch: {}\n commit: {}".format("(stable)", "(tag v{})".format(__version__))) log.println(" platform: {}".format(platform.platform())) log.println(" python: {}".format(sys.version.split('\n')[0])) elif opt in ('-g', '--gui'): # Run using GUI. conf['gui'] = True elif opt in ('-f', '--force'): # Force download. conf['force'] = True elif opt in ('-l', '--playlist', '--playlists'): # Download playlist whenever possible. conf['playlist'] = True if args: if 'gui' in conf and conf['gui']: # Enter GUI mode. from .gui import gui_main gui_main(*args, **conf) else: # Enter console mode. from .console import console_main console_main(*args, **conf)
Main entry point. you-get-dev
main_dev
src/you_get/__main__.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/__main__.py#L24-L85
b746ac01c9f39de94cac2d56f665285b0523b974
def ffmpeg_download_stream(files, title, ext, params={}, output_dir='.', stream=True): """str, str->True WARNING: NOT THE SAME PARMS AS OTHER FUNCTIONS!!!!!! You can basicly download anything with this function but better leave it alone with """ output = title + '.' + ext if not (output_dir == '.'): output = output_dir + '/' + output print('Downloading streaming content with FFmpeg, press q to stop recording...') if stream: ffmpeg_params = [FFMPEG] + ['-y', '-re', '-i'] else: ffmpeg_params = [FFMPEG] + ['-y', '-i'] ffmpeg_params.append(files) #not the same here!!!! if FFMPEG == 'avconv': #who cares? ffmpeg_params += ['-c', 'copy', output] else: ffmpeg_params += ['-c', 'copy', '-bsf:a', 'aac_adtstoasc'] if params is not None: if len(params) > 0: for k, v in params: ffmpeg_params.append(k) ffmpeg_params.append(v) ffmpeg_params.append(output) print(' '.join(ffmpeg_params)) try: a = subprocess.Popen(ffmpeg_params, stdin= subprocess.PIPE) a.communicate() except KeyboardInterrupt: try: a.stdin.write('q'.encode('utf-8')) except: pass return True
[ "def", "ffmpeg_download_stream", "(", "files", ",", "title", ",", "ext", ",", "params", "=", "{", "}", ",", "output_dir", "=", "'.'", ",", "stream", "=", "True", ")", ":", "output", "=", "title", "+", "'.'", "+", "ext", "if", "not", "(", "output_dir", "==", "'.'", ")", ":", "output", "=", "output_dir", "+", "'/'", "+", "output", "print", "(", "'Downloading streaming content with FFmpeg, press q to stop recording...'", ")", "if", "stream", ":", "ffmpeg_params", "=", "[", "FFMPEG", "]", "+", "[", "'-y'", ",", "'-re'", ",", "'-i'", "]", "else", ":", "ffmpeg_params", "=", "[", "FFMPEG", "]", "+", "[", "'-y'", ",", "'-i'", "]", "ffmpeg_params", ".", "append", "(", "files", ")", "#not the same here!!!!", "if", "FFMPEG", "==", "'avconv'", ":", "#who cares?", "ffmpeg_params", "+=", "[", "'-c'", ",", "'copy'", ",", "output", "]", "else", ":", "ffmpeg_params", "+=", "[", "'-c'", ",", "'copy'", ",", "'-bsf:a'", ",", "'aac_adtstoasc'", "]", "if", "params", "is", "not", "None", ":", "if", "len", "(", "params", ")", ">", "0", ":", "for", "k", ",", "v", "in", "params", ":", "ffmpeg_params", ".", "append", "(", "k", ")", "ffmpeg_params", ".", "append", "(", "v", ")", "ffmpeg_params", ".", "append", "(", "output", ")", "print", "(", "' '", ".", "join", "(", "ffmpeg_params", ")", ")", "try", ":", "a", "=", "subprocess", ".", "Popen", "(", "ffmpeg_params", ",", "stdin", "=", "subprocess", ".", "PIPE", ")", "a", ".", "communicate", "(", ")", "except", "KeyboardInterrupt", ":", "try", ":", "a", ".", "stdin", ".", "write", "(", "'q'", ".", "encode", "(", "'utf-8'", ")", ")", "except", ":", "pass", "return", "True" ]
[ "str", "str", "-", ">", "True", "WARNING", ":", "NOT", "THE", "SAME", "PARMS", "AS", "OTHER", "FUNCTIONS!!!!!!", "You", "can", "basicly", "download", "anything", "with", "this", "function", "but", "better", "leave", "it", "alone", "with" ]
soimort/you-get
def ffmpeg_download_stream(files, title, ext, params={}, output_dir='.', stream=True): """str, str->True WARNING: NOT THE SAME PARMS AS OTHER FUNCTIONS!!!!!! You can basicly download anything with this function but better leave it alone with """ output = title + '.' + ext if not (output_dir == '.'): output = output_dir + '/' + output print('Downloading streaming content with FFmpeg, press q to stop recording...') if stream: ffmpeg_params = [FFMPEG] + ['-y', '-re', '-i'] else: ffmpeg_params = [FFMPEG] + ['-y', '-i'] ffmpeg_params.append(files) #not the same here!!!! if FFMPEG == 'avconv': #who cares? ffmpeg_params += ['-c', 'copy', output] else: ffmpeg_params += ['-c', 'copy', '-bsf:a', 'aac_adtstoasc'] if params is not None: if len(params) > 0: for k, v in params: ffmpeg_params.append(k) ffmpeg_params.append(v) ffmpeg_params.append(output) print(' '.join(ffmpeg_params)) try: a = subprocess.Popen(ffmpeg_params, stdin= subprocess.PIPE) a.communicate() except KeyboardInterrupt: try: a.stdin.write('q'.encode('utf-8')) except: pass return True
str, str->True WARNING: NOT THE SAME PARMS AS OTHER FUNCTIONS!!!!!! You can basicly download anything with this function but better leave it alone with
ffmpeg_download_stream
src/you_get/processor/ffmpeg.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/processor/ffmpeg.py#L220-L262
b746ac01c9f39de94cac2d56f665285b0523b974
def match1(text, *patterns): """Scans through a string for substrings matched some patterns (first-subgroups only). Args: text: A string to be scanned. patterns: Arbitrary number of regex patterns. Returns: When only one pattern is given, returns a string (None if no match found). When more than one pattern are given, returns a list of strings ([] if no match found). """ if len(patterns) == 1: pattern = patterns[0] match = re.search(pattern, text) if match: return match.group(1) else: return None else: ret = [] for pattern in patterns: match = re.search(pattern, text) if match: ret.append(match.group(1)) return ret
[ "def", "match1", "(", "text", ",", "*", "patterns", ")", ":", "if", "len", "(", "patterns", ")", "==", "1", ":", "pattern", "=", "patterns", "[", "0", "]", "match", "=", "re", ".", "search", "(", "pattern", ",", "text", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1", ")", "else", ":", "return", "None", "else", ":", "ret", "=", "[", "]", "for", "pattern", "in", "patterns", ":", "match", "=", "re", ".", "search", "(", "pattern", ",", "text", ")", "if", "match", ":", "ret", ".", "append", "(", "match", ".", "group", "(", "1", ")", ")", "return", "ret" ]
[ "Scans", "through", "a", "string", "for", "substrings", "matched", "some", "patterns", "(", "first", "-", "subgroups", "only", ")", "." ]
soimort/you-get
def match1(text, *patterns): """Scans through a string for substrings matched some patterns (first-subgroups only). Args: text: A string to be scanned. patterns: Arbitrary number of regex patterns. Returns: When only one pattern is given, returns a string (None if no match found). When more than one pattern are given, returns a list of strings ([] if no match found). """ if len(patterns) == 1: pattern = patterns[0] match = re.search(pattern, text) if match: return match.group(1) else: return None else: ret = [] for pattern in patterns: match = re.search(pattern, text) if match: ret.append(match.group(1)) return ret
Scans through a string for substrings matched some patterns (first-subgroups only). Args: text: A string to be scanned. patterns: Arbitrary number of regex patterns. Returns: When only one pattern is given, returns a string (None if no match found). When more than one pattern are given, returns a list of strings ([] if no match found).
match1
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L224-L249
b746ac01c9f39de94cac2d56f665285b0523b974
def matchall(text, patterns): """Scans through a string for substrings matched some patterns. Args: text: A string to be scanned. patterns: a list of regex pattern. Returns: a list if matched. empty if not. """ ret = [] for pattern in patterns: match = re.findall(pattern, text) ret += match return ret
[ "def", "matchall", "(", "text", ",", "patterns", ")", ":", "ret", "=", "[", "]", "for", "pattern", "in", "patterns", ":", "match", "=", "re", ".", "findall", "(", "pattern", ",", "text", ")", "ret", "+=", "match", "return", "ret" ]
[ "Scans", "through", "a", "string", "for", "substrings", "matched", "some", "patterns", "." ]
soimort/you-get
def matchall(text, patterns): """Scans through a string for substrings matched some patterns. Args: text: A string to be scanned. patterns: a list of regex pattern. Returns: a list if matched. empty if not. """ ret = [] for pattern in patterns: match = re.findall(pattern, text) ret += match return ret
Scans through a string for substrings matched some patterns. Args: text: A string to be scanned. patterns: a list of regex pattern. Returns: a list if matched. empty if not.
matchall
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L252-L268
b746ac01c9f39de94cac2d56f665285b0523b974
def parse_query_param(url, param): """Parses the query string of a URL and returns the value of a parameter. Args: url: A URL. param: A string representing the name of the parameter. Returns: The value of the parameter. """ try: return parse.parse_qs(parse.urlparse(url).query)[param][0] except: return None
[ "def", "parse_query_param", "(", "url", ",", "param", ")", ":", "try", ":", "return", "parse", ".", "parse_qs", "(", "parse", ".", "urlparse", "(", "url", ")", ".", "query", ")", "[", "param", "]", "[", "0", "]", "except", ":", "return", "None" ]
[ "Parses", "the", "query", "string", "of", "a", "URL", "and", "returns", "the", "value", "of", "a", "parameter", "." ]
soimort/you-get
def parse_query_param(url, param): """Parses the query string of a URL and returns the value of a parameter. Args: url: A URL. param: A string representing the name of the parameter. Returns: The value of the parameter. """ try: return parse.parse_qs(parse.urlparse(url).query)[param][0] except: return None
Parses the query string of a URL and returns the value of a parameter. Args: url: A URL. param: A string representing the name of the parameter. Returns: The value of the parameter.
parse_query_param
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L285-L299
b746ac01c9f39de94cac2d56f665285b0523b974
def ungzip(data): """Decompresses data for Content-Encoding: gzip. """ from io import BytesIO import gzip buffer = BytesIO(data) f = gzip.GzipFile(fileobj=buffer) return f.read()
[ "def", "ungzip", "(", "data", ")", ":", "from", "io", "import", "BytesIO", "import", "gzip", "buffer", "=", "BytesIO", "(", "data", ")", "f", "=", "gzip", ".", "GzipFile", "(", "fileobj", "=", "buffer", ")", "return", "f", ".", "read", "(", ")" ]
[ "Decompresses", "data", "for", "Content", "-", "Encoding", ":", "gzip", "." ]
soimort/you-get
def ungzip(data): """Decompresses data for Content-Encoding: gzip. """ from io import BytesIO import gzip buffer = BytesIO(data) f = gzip.GzipFile(fileobj=buffer) return f.read()
Decompresses data for Content-Encoding: gzip.
ungzip
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L319-L326
b746ac01c9f39de94cac2d56f665285b0523b974
def undeflate(data): """Decompresses data for Content-Encoding: deflate. (the zlib compression is used.) """ import zlib decompressobj = zlib.decompressobj(-zlib.MAX_WBITS) return decompressobj.decompress(data)+decompressobj.flush()
[ "def", "undeflate", "(", "data", ")", ":", "import", "zlib", "decompressobj", "=", "zlib", ".", "decompressobj", "(", "-", "zlib", ".", "MAX_WBITS", ")", "return", "decompressobj", ".", "decompress", "(", "data", ")", "+", "decompressobj", ".", "flush", "(", ")" ]
[ "Decompresses", "data", "for", "Content", "-", "Encoding", ":", "deflate", ".", "(", "the", "zlib", "compression", "is", "used", ".", ")" ]
soimort/you-get
def undeflate(data): """Decompresses data for Content-Encoding: deflate. (the zlib compression is used.) """ import zlib decompressobj = zlib.decompressobj(-zlib.MAX_WBITS) return decompressobj.decompress(data)+decompressobj.flush()
Decompresses data for Content-Encoding: deflate. (the zlib compression is used.)
undeflate
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L329-L335
b746ac01c9f39de94cac2d56f665285b0523b974
def get_content(url, headers={}, decoded=True): """Gets the content of a URL via sending a HTTP GET request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string. """ logging.debug('get_content: %s' % url) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) response = urlopen_with_retry(req) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1( response.getheader('Content-Type', ''), r'charset=([\w-]+)' ) if charset is not None: data = data.decode(charset, 'ignore') else: data = data.decode('utf-8', 'ignore') return data
[ "def", "get_content", "(", "url", ",", "headers", "=", "{", "}", ",", "decoded", "=", "True", ")", ":", "logging", ".", "debug", "(", "'get_content: %s'", "%", "url", ")", "req", "=", "request", ".", "Request", "(", "url", ",", "headers", "=", "headers", ")", "if", "cookies", ":", "cookies", ".", "add_cookie_header", "(", "req", ")", "req", ".", "headers", ".", "update", "(", "req", ".", "unredirected_hdrs", ")", "response", "=", "urlopen_with_retry", "(", "req", ")", "data", "=", "response", ".", "read", "(", ")", "# Handle HTTP compression for gzip and deflate (zlib)", "content_encoding", "=", "response", ".", "getheader", "(", "'Content-Encoding'", ")", "if", "content_encoding", "==", "'gzip'", ":", "data", "=", "ungzip", "(", "data", ")", "elif", "content_encoding", "==", "'deflate'", ":", "data", "=", "undeflate", "(", "data", ")", "# Decode the response body", "if", "decoded", ":", "charset", "=", "match1", "(", "response", ".", "getheader", "(", "'Content-Type'", ",", "''", ")", ",", "r'charset=([\\w-]+)'", ")", "if", "charset", "is", "not", "None", ":", "data", "=", "data", ".", "decode", "(", "charset", ",", "'ignore'", ")", "else", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ",", "'ignore'", ")", "return", "data" ]
[ "Gets", "the", "content", "of", "a", "URL", "via", "sending", "a", "HTTP", "GET", "request", "." ]
soimort/you-get
def get_content(url, headers={}, decoded=True): """Gets the content of a URL via sending a HTTP GET request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string. """ logging.debug('get_content: %s' % url) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) response = urlopen_with_retry(req) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1( response.getheader('Content-Type', ''), r'charset=([\w-]+)' ) if charset is not None: data = data.decode(charset, 'ignore') else: data = data.decode('utf-8', 'ignore') return data
Gets the content of a URL via sending a HTTP GET request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string.
get_content
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L415-L454
b746ac01c9f39de94cac2d56f665285b0523b974
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs): """Post the content of a URL via sending a HTTP POST request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string. """ if kwargs.get('post_data_raw'): logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw'])) else: logging.debug('post_content: %s\npost_data: %s' % (url, post_data)) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) if kwargs.get('post_data_raw'): post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8') else: post_data_enc = bytes(parse.urlencode(post_data), 'utf-8') response = urlopen_with_retry(req, data=post_data_enc) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1( response.getheader('Content-Type'), r'charset=([\w-]+)' ) if charset is not None: data = data.decode(charset) else: data = data.decode('utf-8') return data
[ "def", "post_content", "(", "url", ",", "headers", "=", "{", "}", ",", "post_data", "=", "{", "}", ",", "decoded", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'post_data_raw'", ")", ":", "logging", ".", "debug", "(", "'post_content: %s\\npost_data_raw: %s'", "%", "(", "url", ",", "kwargs", "[", "'post_data_raw'", "]", ")", ")", "else", ":", "logging", ".", "debug", "(", "'post_content: %s\\npost_data: %s'", "%", "(", "url", ",", "post_data", ")", ")", "req", "=", "request", ".", "Request", "(", "url", ",", "headers", "=", "headers", ")", "if", "cookies", ":", "cookies", ".", "add_cookie_header", "(", "req", ")", "req", ".", "headers", ".", "update", "(", "req", ".", "unredirected_hdrs", ")", "if", "kwargs", ".", "get", "(", "'post_data_raw'", ")", ":", "post_data_enc", "=", "bytes", "(", "kwargs", "[", "'post_data_raw'", "]", ",", "'utf-8'", ")", "else", ":", "post_data_enc", "=", "bytes", "(", "parse", ".", "urlencode", "(", "post_data", ")", ",", "'utf-8'", ")", "response", "=", "urlopen_with_retry", "(", "req", ",", "data", "=", "post_data_enc", ")", "data", "=", "response", ".", "read", "(", ")", "# Handle HTTP compression for gzip and deflate (zlib)", "content_encoding", "=", "response", ".", "getheader", "(", "'Content-Encoding'", ")", "if", "content_encoding", "==", "'gzip'", ":", "data", "=", "ungzip", "(", "data", ")", "elif", "content_encoding", "==", "'deflate'", ":", "data", "=", "undeflate", "(", "data", ")", "# Decode the response body", "if", "decoded", ":", "charset", "=", "match1", "(", "response", ".", "getheader", "(", "'Content-Type'", ")", ",", "r'charset=([\\w-]+)'", ")", "if", "charset", "is", "not", "None", ":", "data", "=", "data", ".", "decode", "(", "charset", ")", "else", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", "return", "data" ]
[ "Post", "the", "content", "of", "a", "URL", "via", "sending", "a", "HTTP", "POST", "request", "." ]
soimort/you-get
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs): """Post the content of a URL via sending a HTTP POST request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string. """ if kwargs.get('post_data_raw'): logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw'])) else: logging.debug('post_content: %s\npost_data: %s' % (url, post_data)) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) if kwargs.get('post_data_raw'): post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8') else: post_data_enc = bytes(parse.urlencode(post_data), 'utf-8') response = urlopen_with_retry(req, data=post_data_enc) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1( response.getheader('Content-Type'), r'charset=([\w-]+)' ) if charset is not None: data = data.decode(charset) else: data = data.decode('utf-8') return data
Post the content of a URL via sending a HTTP POST request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string.
post_content
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L457-L501
b746ac01c9f39de94cac2d56f665285b0523b974
def parse_host(host): """Parses host name and port number from a string. """ if re.match(r'^(\d+)$', host) is not None: return ("0.0.0.0", int(host)) if re.match(r'^(\w+)://', host) is None: host = "//" + host o = parse.urlparse(host) hostname = o.hostname or "0.0.0.0" port = o.port or 0 return (hostname, port)
[ "def", "parse_host", "(", "host", ")", ":", "if", "re", ".", "match", "(", "r'^(\\d+)$'", ",", "host", ")", "is", "not", "None", ":", "return", "(", "\"0.0.0.0\"", ",", "int", "(", "host", ")", ")", "if", "re", ".", "match", "(", "r'^(\\w+)://'", ",", "host", ")", "is", "None", ":", "host", "=", "\"//\"", "+", "host", "o", "=", "parse", ".", "urlparse", "(", "host", ")", "hostname", "=", "o", ".", "hostname", "or", "\"0.0.0.0\"", "port", "=", "o", ".", "port", "or", "0", "return", "(", "hostname", ",", "port", ")" ]
[ "Parses", "host", "name", "and", "port", "number", "from", "a", "string", "." ]
soimort/you-get
def parse_host(host): """Parses host name and port number from a string. """ if re.match(r'^(\d+)$', host) is not None: return ("0.0.0.0", int(host)) if re.match(r'^(\w+)://', host) is None: host = "//" + host o = parse.urlparse(host) hostname = o.hostname or "0.0.0.0" port = o.port or 0 return (hostname, port)
Parses host name and port number from a string.
parse_host
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L1216-L1226
b746ac01c9f39de94cac2d56f665285b0523b974
def print_more_compatible(*args, **kwargs): import builtins as __builtin__ """Overload default print function as py (<3.3) does not support 'flush' keyword. Although the function name can be same as print to get itself overloaded automatically, I'd rather leave it with a different name and only overload it when importing to make less confusion. """ # nothing happens on py3.3 and later if sys.version_info[:2] >= (3, 3): return __builtin__.print(*args, **kwargs) # in lower pyver (e.g. 3.2.x), remove 'flush' keyword and flush it as requested doFlush = kwargs.pop('flush', False) ret = __builtin__.print(*args, **kwargs) if doFlush: kwargs.get('file', sys.stdout).flush() return ret
[ "def", "print_more_compatible", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "builtins", "as", "__builtin__", "# nothing happens on py3.3 and later", "if", "sys", ".", "version_info", "[", ":", "2", "]", ">=", "(", "3", ",", "3", ")", ":", "return", "__builtin__", ".", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# in lower pyver (e.g. 3.2.x), remove 'flush' keyword and flush it as requested", "doFlush", "=", "kwargs", ".", "pop", "(", "'flush'", ",", "False", ")", "ret", "=", "__builtin__", ".", "print", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "doFlush", ":", "kwargs", ".", "get", "(", "'file'", ",", "sys", ".", "stdout", ")", ".", "flush", "(", ")", "return", "ret" ]
[ "Overload", "default", "print", "function", "as", "py", "(", "<3", ".", "3", ")", "does", "not", "support", "flush", "keyword", ".", "Although", "the", "function", "name", "can", "be", "same", "as", "print", "to", "get", "itself", "overloaded", "automatically", "I", "d", "rather", "leave", "it", "with", "a", "different", "name", "and", "only", "overload", "it", "when", "importing", "to", "make", "less", "confusion", "." ]
soimort/you-get
def print_more_compatible(*args, **kwargs): import builtins as __builtin__ """Overload default print function as py (<3.3) does not support 'flush' keyword. Although the function name can be same as print to get itself overloaded automatically, I'd rather leave it with a different name and only overload it when importing to make less confusion. """ # nothing happens on py3.3 and later if sys.version_info[:2] >= (3, 3): return __builtin__.print(*args, **kwargs) # in lower pyver (e.g. 3.2.x), remove 'flush' keyword and flush it as requested doFlush = kwargs.pop('flush', False) ret = __builtin__.print(*args, **kwargs) if doFlush: kwargs.get('file', sys.stdout).flush() return ret
Overload default print function as py (<3.3) does not support 'flush' keyword. Although the function name can be same as print to get itself overloaded automatically, I'd rather leave it with a different name and only overload it when importing to make less confusion.
print_more_compatible
src/you_get/common.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L1258-L1273
b746ac01c9f39de94cac2d56f665285b0523b974
def showroom_get_roomid_by_room_url_key(room_url_key): """str->str""" fake_headers_mobile = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'UTF-8,*;q=0.5', 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'en-US,en;q=0.8', 'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36' } webpage_url = 'https://www.showroom-live.com/' + room_url_key html = get_content(webpage_url, headers = fake_headers_mobile) roomid = match1(html, r'room\?room_id\=(\d+)') assert roomid return roomid
[ "def", "showroom_get_roomid_by_room_url_key", "(", "room_url_key", ")", ":", "fake_headers_mobile", "=", "{", "'Accept'", ":", "'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'", ",", "'Accept-Charset'", ":", "'UTF-8,*;q=0.5'", ",", "'Accept-Encoding'", ":", "'gzip,deflate,sdch'", ",", "'Accept-Language'", ":", "'en-US,en;q=0.8'", ",", "'User-Agent'", ":", "'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'", "}", "webpage_url", "=", "'https://www.showroom-live.com/'", "+", "room_url_key", "html", "=", "get_content", "(", "webpage_url", ",", "headers", "=", "fake_headers_mobile", ")", "roomid", "=", "match1", "(", "html", ",", "r'room\\?room_id\\=(\\d+)'", ")", "assert", "roomid", "return", "roomid" ]
[ "str", "-", ">", "str" ]
soimort/you-get
def showroom_get_roomid_by_room_url_key(room_url_key): """str->str""" fake_headers_mobile = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'UTF-8,*;q=0.5', 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'en-US,en;q=0.8', 'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36' } webpage_url = 'https://www.showroom-live.com/' + room_url_key html = get_content(webpage_url, headers = fake_headers_mobile) roomid = match1(html, r'room\?room_id\=(\d+)') assert roomid return roomid
str->str
showroom_get_roomid_by_room_url_key
src/you_get/extractors/showroom.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/showroom.py#L11-L24
b746ac01c9f39de94cac2d56f665285b0523b974
def showroom_download_by_room_id(room_id, output_dir = '.', merge = False, info_only = False, **kwargs): '''Source: Android mobile''' while True: timestamp = str(int(time() * 1000)) api_endpoint = 'https://www.showroom-live.com/api/live/streaming_url?room_id={room_id}&_={timestamp}'.format(room_id = room_id, timestamp = timestamp) html = get_content(api_endpoint) html = json.loads(html) #{'streaming_url_list': [{'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 1, 'label': 'original spec(low latency)', 'is_default': True, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed/playlist.m3u8', 'is_default': True, 'id': 2, 'type': 'hls', 'label': 'original spec'}, {'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 3, 'label': 'low spec(low latency)', 'is_default': False, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low/playlist.m3u8', 'is_default': False, 'id': 4, 'type': 'hls', 'label': 'low spec'}]} if len(html) >= 1: break log.w('The live show is currently offline.') sleep(1) #This is mainly for testing the M3U FFmpeg parser so I would ignore any non-m3u ones stream_url = [i['url'] for i in html['streaming_url_list'] if i['is_default'] and i['type'] == 'hls'][0] assert stream_url #title title = '' profile_api = 'https://www.showroom-live.com/api/room/profile?room_id={room_id}'.format(room_id = room_id) html = loads(get_content(profile_api)) try: title = html['main_name'] except KeyError: title = 'Showroom_{room_id}'.format(room_id = room_id) type_, ext, size = url_info(stream_url) print_info(site_info, title, type_, size) if not info_only: download_url_ffmpeg(url=stream_url, title=title, ext= 'mp4', output_dir=output_dir)
[ "def", "showroom_download_by_room_id", "(", "room_id", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "while", "True", ":", "timestamp", "=", "str", "(", "int", "(", "time", "(", ")", "*", "1000", ")", ")", "api_endpoint", "=", "'https://www.showroom-live.com/api/live/streaming_url?room_id={room_id}&_={timestamp}'", ".", "format", "(", "room_id", "=", "room_id", ",", "timestamp", "=", "timestamp", ")", "html", "=", "get_content", "(", "api_endpoint", ")", "html", "=", "json", ".", "loads", "(", "html", ")", "#{'streaming_url_list': [{'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 1, 'label': 'original spec(low latency)', 'is_default': True, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed/playlist.m3u8', 'is_default': True, 'id': 2, 'type': 'hls', 'label': 'original spec'}, {'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 3, 'label': 'low spec(low latency)', 'is_default': False, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low/playlist.m3u8', 'is_default': False, 'id': 4, 'type': 'hls', 'label': 'low spec'}]}", "if", "len", "(", "html", ")", ">=", "1", ":", "break", "log", ".", "w", "(", "'The live show is currently offline.'", ")", "sleep", "(", "1", ")", "#This is mainly for testing the M3U FFmpeg parser so I would ignore any non-m3u ones", "stream_url", "=", "[", "i", "[", "'url'", "]", "for", "i", "in", "html", "[", "'streaming_url_list'", "]", "if", "i", "[", "'is_default'", "]", "and", "i", "[", "'type'", "]", "==", "'hls'", "]", "[", "0", "]", "assert", "stream_url", "#title", "title", "=", "''", "profile_api", "=", "'https://www.showroom-live.com/api/room/profile?room_id={room_id}'", ".", "format", "(", "room_id", "=", "room_id", ")", "html", "=", "loads", "(", "get_content", "(", "profile_api", ")", ")", "try", ":", "title", "=", "html", "[", "'main_name'", "]", "except", "KeyError", ":", "title", "=", "'Showroom_{room_id}'", ".", "format", "(", "room_id", "=", "room_id", ")", "type_", ",", "ext", ",", "size", "=", "url_info", "(", "stream_url", ")", "print_info", "(", "site_info", ",", "title", ",", "type_", ",", "size", ")", "if", "not", "info_only", ":", "download_url_ffmpeg", "(", "url", "=", "stream_url", ",", "title", "=", "title", ",", "ext", "=", "'mp4'", ",", "output_dir", "=", "output_dir", ")" ]
[ "Source", ":", "Android", "mobile" ]
soimort/you-get
def showroom_download_by_room_id(room_id, output_dir = '.', merge = False, info_only = False, **kwargs): '''Source: Android mobile''' while True: timestamp = str(int(time() * 1000)) api_endpoint = 'https://www.showroom-live.com/api/live/streaming_url?room_id={room_id}&_={timestamp}'.format(room_id = room_id, timestamp = timestamp) html = get_content(api_endpoint) html = json.loads(html) #{'streaming_url_list': [{'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 1, 'label': 'original spec(low latency)', 'is_default': True, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed/playlist.m3u8', 'is_default': True, 'id': 2, 'type': 'hls', 'label': 'original spec'}, {'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 3, 'label': 'low spec(low latency)', 'is_default': False, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low/playlist.m3u8', 'is_default': False, 'id': 4, 'type': 'hls', 'label': 'low spec'}]} if len(html) >= 1: break log.w('The live show is currently offline.') sleep(1) #This is mainly for testing the M3U FFmpeg parser so I would ignore any non-m3u ones stream_url = [i['url'] for i in html['streaming_url_list'] if i['is_default'] and i['type'] == 'hls'][0] assert stream_url #title title = '' profile_api = 'https://www.showroom-live.com/api/room/profile?room_id={room_id}'.format(room_id = room_id) html = loads(get_content(profile_api)) try: title = html['main_name'] except KeyError: title = 'Showroom_{room_id}'.format(room_id = room_id) type_, ext, size = url_info(stream_url) print_info(site_info, title, type_, size) if not info_only: download_url_ffmpeg(url=stream_url, title=title, ext= 'mp4', output_dir=output_dir)
Source: Android mobile
showroom_download_by_room_id
src/you_get/extractors/showroom.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/showroom.py#L26-L56
b746ac01c9f39de94cac2d56f665285b0523b974
def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex): """JSON, int, int, int->str Get a proper title with courseid+topicID+partID.""" return '_'.join([json_content[0]['name'], json_content[0]['Topics'][tIndex]['name'], json_content[0]['Topics'][tIndex]['Parts'][pIndex]['name']])
[ "def", "_wanmen_get_title_by_json_topic_part", "(", "json_content", ",", "tIndex", ",", "pIndex", ")", ":", "return", "'_'", ".", "join", "(", "[", "json_content", "[", "0", "]", "[", "'name'", "]", ",", "json_content", "[", "0", "]", "[", "'Topics'", "]", "[", "tIndex", "]", "[", "'name'", "]", ",", "json_content", "[", "0", "]", "[", "'Topics'", "]", "[", "tIndex", "]", "[", "'Parts'", "]", "[", "pIndex", "]", "[", "'name'", "]", "]", ")" ]
[ "JSON", "int", "int", "int", "-", ">", "str", "Get", "a", "proper", "title", "with", "courseid", "+", "topicID", "+", "partID", "." ]
soimort/you-get
def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex): """JSON, int, int, int->str Get a proper title with courseid+topicID+partID.""" return '_'.join([json_content[0]['name'], json_content[0]['Topics'][tIndex]['name'], json_content[0]['Topics'][tIndex]['Parts'][pIndex]['name']])
JSON, int, int, int->str Get a proper title with courseid+topicID+partID.
_wanmen_get_title_by_json_topic_part
src/you_get/extractors/wanmen.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/wanmen.py#L18-L25
b746ac01c9f39de94cac2d56f665285b0523b974
def wanmen_download_by_course(json_api_content, output_dir='.', merge=True, info_only=False, **kwargs): """int->None Download a WHOLE course. Reuse the API call to save time.""" for tIndex in range(len(json_api_content[0]['Topics'])): for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])): wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
[ "def", "wanmen_download_by_course", "(", "json_api_content", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "for", "tIndex", "in", "range", "(", "len", "(", "json_api_content", "[", "0", "]", "[", "'Topics'", "]", ")", ")", ":", "for", "pIndex", "in", "range", "(", "len", "(", "json_api_content", "[", "0", "]", "[", "'Topics'", "]", "[", "tIndex", "]", "[", "'Parts'", "]", ")", ")", ":", "wanmen_download_by_course_topic_part", "(", "json_api_content", ",", "tIndex", ",", "pIndex", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ",", "*", "*", "kwargs", ")" ]
[ "int", "-", ">", "None", "Download", "a", "WHOLE", "course", ".", "Reuse", "the", "API", "call", "to", "save", "time", "." ]
soimort/you-get
def wanmen_download_by_course(json_api_content, output_dir='.', merge=True, info_only=False, **kwargs): """int->None Download a WHOLE course. Reuse the API call to save time.""" for tIndex in range(len(json_api_content[0]['Topics'])): for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])): wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
int->None Download a WHOLE course. Reuse the API call to save time.
wanmen_download_by_course
src/you_get/extractors/wanmen.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/wanmen.py#L37-L51
b746ac01c9f39de94cac2d56f665285b0523b974
def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs): """int, int, int->None Download ONE PART of the course.""" html = json_api_content title = _wanmen_get_title_by_json_topic_part(html, tIndex, pIndex) bokeccID = _wanmen_get_boke_id_by_json_topic_part(html, tIndex, pIndex) bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
[ "def", "wanmen_download_by_course_topic_part", "(", "json_api_content", ",", "tIndex", ",", "pIndex", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "html", "=", "json_api_content", "title", "=", "_wanmen_get_title_by_json_topic_part", "(", "html", ",", "tIndex", ",", "pIndex", ")", "bokeccID", "=", "_wanmen_get_boke_id_by_json_topic_part", "(", "html", ",", "tIndex", ",", "pIndex", ")", "bokecc_download_by_id", "(", "vid", "=", "bokeccID", ",", "title", "=", "title", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ",", "*", "*", "kwargs", ")" ]
[ "int", "int", "int", "-", ">", "None", "Download", "ONE", "PART", "of", "the", "course", "." ]
soimort/you-get
def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs): """int, int, int->None Download ONE PART of the course.""" html = json_api_content title = _wanmen_get_title_by_json_topic_part(html, tIndex, pIndex) bokeccID = _wanmen_get_boke_id_by_json_topic_part(html, tIndex, pIndex) bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
int, int, int->None Download ONE PART of the course.
wanmen_download_by_course_topic_part
src/you_get/extractors/wanmen.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/wanmen.py#L69-L84
b746ac01c9f39de94cac2d56f665285b0523b974
def get_streams_by_id(account_number, video_id): """ int, int->list Get the height of the videos. Since brightcove is using 3 kinds of links: rtmp, http and https, we will be using the HTTPS one to make it secure. If somehow akamaihd.net is blocked by the Great Fucking Wall, change the "startswith https" to http. """ endpoint = 'https://edge.api.brightcove.com/playback/v1/accounts/{account_number}/videos/{video_id}'.format(account_number = account_number, video_id = video_id) fake_header_id = fake_headers #is this somehow related to the time? Magic.... fake_header_id['Accept'] ='application/json;pk=BCpkADawqM1cc6wmJQC2tvoXZt4mrB7bFfi6zGt9QnOzprPZcGLE9OMGJwspQwKfuFYuCjAAJ53JdjI8zGFx1ll4rxhYJ255AXH1BQ10rnm34weknpfG-sippyQ' html = get_content(endpoint, headers= fake_header_id) html_json = json.loads(html) link_list = [] for i in html_json['sources']: if 'src' in i: #to avoid KeyError if i['src'].startswith('https'): link_list.append((str(i['height']), i['src'])) return link_list
[ "def", "get_streams_by_id", "(", "account_number", ",", "video_id", ")", ":", "endpoint", "=", "'https://edge.api.brightcove.com/playback/v1/accounts/{account_number}/videos/{video_id}'", ".", "format", "(", "account_number", "=", "account_number", ",", "video_id", "=", "video_id", ")", "fake_header_id", "=", "fake_headers", "#is this somehow related to the time? Magic....", "fake_header_id", "[", "'Accept'", "]", "=", "'application/json;pk=BCpkADawqM1cc6wmJQC2tvoXZt4mrB7bFfi6zGt9QnOzprPZcGLE9OMGJwspQwKfuFYuCjAAJ53JdjI8zGFx1ll4rxhYJ255AXH1BQ10rnm34weknpfG-sippyQ'", "html", "=", "get_content", "(", "endpoint", ",", "headers", "=", "fake_header_id", ")", "html_json", "=", "json", ".", "loads", "(", "html", ")", "link_list", "=", "[", "]", "for", "i", "in", "html_json", "[", "'sources'", "]", ":", "if", "'src'", "in", "i", ":", "#to avoid KeyError", "if", "i", "[", "'src'", "]", ".", "startswith", "(", "'https'", ")", ":", "link_list", ".", "append", "(", "(", "str", "(", "i", "[", "'height'", "]", ")", ",", "i", "[", "'src'", "]", ")", ")", "return", "link_list" ]
[ "int", "int", "-", ">", "list", "Get", "the", "height", "of", "the", "videos", ".", "Since", "brightcove", "is", "using", "3", "kinds", "of", "links", ":", "rtmp", "http", "and", "https", "we", "will", "be", "using", "the", "HTTPS", "one", "to", "make", "it", "secure", ".", "If", "somehow", "akamaihd", ".", "net", "is", "blocked", "by", "the", "Great", "Fucking", "Wall", "change", "the", "startswith", "https", "to", "http", "." ]
soimort/you-get
def get_streams_by_id(account_number, video_id): """ int, int->list Get the height of the videos. Since brightcove is using 3 kinds of links: rtmp, http and https, we will be using the HTTPS one to make it secure. If somehow akamaihd.net is blocked by the Great Fucking Wall, change the "startswith https" to http. """ endpoint = 'https://edge.api.brightcove.com/playback/v1/accounts/{account_number}/videos/{video_id}'.format(account_number = account_number, video_id = video_id) fake_header_id = fake_headers #is this somehow related to the time? Magic.... fake_header_id['Accept'] ='application/json;pk=BCpkADawqM1cc6wmJQC2tvoXZt4mrB7bFfi6zGt9QnOzprPZcGLE9OMGJwspQwKfuFYuCjAAJ53JdjI8zGFx1ll4rxhYJ255AXH1BQ10rnm34weknpfG-sippyQ' html = get_content(endpoint, headers= fake_header_id) html_json = json.loads(html) link_list = [] for i in html_json['sources']: if 'src' in i: #to avoid KeyError if i['src'].startswith('https'): link_list.append((str(i['height']), i['src'])) return link_list
int, int->list Get the height of the videos. Since brightcove is using 3 kinds of links: rtmp, http and https, we will be using the HTTPS one to make it secure. If somehow akamaihd.net is blocked by the Great Fucking Wall, change the "startswith https" to http.
Bigthink.get_streams_by_id
src/you_get/extractors/bigthink.py
test
python
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/bigthink.py#L22-L49
b746ac01c9f39de94cac2d56f665285b0523b974
def has_task(self, task_instance): """ Checks if a task is either queued or running in this executor :param task_instance: TaskInstance :return: True if the task is known to this executor """ if task_instance.key in self.queued_tasks or task_instance.key in self.running: return True
[ "def", "has_task", "(", "self", ",", "task_instance", ")", ":", "if", "task_instance", ".", "key", "in", "self", ".", "queued_tasks", "or", "task_instance", ".", "key", "in", "self", ".", "running", ":", "return", "True" ]
[ "Checks", "if", "a", "task", "is", "either", "queued", "or", "running", "in", "this", "executor" ]
apache/airflow
def has_task(self, task_instance): """ Checks if a task is either queued or running in this executor :param task_instance: TaskInstance :return: True if the task is known to this executor """ if task_instance.key in self.queued_tasks or task_instance.key in self.running: return True
Checks if a task is either queued or running in this executor :param task_instance: TaskInstance :return: True if the task is known to this executor
BaseExecutor.has_task
airflow/executors/base_executor.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/executors/base_executor.py#L97-L105
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def get_event_buffer(self, dag_ids=None): """ Returns and flush the event buffer. In case dag_ids is specified it will only return and flush events for the given dag_ids. Otherwise it returns and flushes all :param dag_ids: to dag_ids to return events for, if None returns all :return: a dict of events """ cleared_events = dict() if dag_ids is None: cleared_events = self.event_buffer self.event_buffer = dict() else: for key in list(self.event_buffer.keys()): dag_id, _, _, _ = key if dag_id in dag_ids: cleared_events[key] = self.event_buffer.pop(key) return cleared_events
[ "def", "get_event_buffer", "(", "self", ",", "dag_ids", "=", "None", ")", ":", "cleared_events", "=", "dict", "(", ")", "if", "dag_ids", "is", "None", ":", "cleared_events", "=", "self", ".", "event_buffer", "self", ".", "event_buffer", "=", "dict", "(", ")", "else", ":", "for", "key", "in", "list", "(", "self", ".", "event_buffer", ".", "keys", "(", ")", ")", ":", "dag_id", ",", "_", ",", "_", ",", "_", "=", "key", "if", "dag_id", "in", "dag_ids", ":", "cleared_events", "[", "key", "]", "=", "self", ".", "event_buffer", ".", "pop", "(", "key", ")", "return", "cleared_events" ]
[ "Returns", "and", "flush", "the", "event", "buffer", ".", "In", "case", "dag_ids", "is", "specified", "it", "will", "only", "return", "and", "flush", "events", "for", "the", "given", "dag_ids", ".", "Otherwise", "it", "returns", "and", "flushes", "all" ]
apache/airflow
def get_event_buffer(self, dag_ids=None): """ Returns and flush the event buffer. In case dag_ids is specified it will only return and flush events for the given dag_ids. Otherwise it returns and flushes all :param dag_ids: to dag_ids to return events for, if None returns all :return: a dict of events """ cleared_events = dict() if dag_ids is None: cleared_events = self.event_buffer self.event_buffer = dict() else: for key in list(self.event_buffer.keys()): dag_id, _, _, _ = key if dag_id in dag_ids: cleared_events[key] = self.event_buffer.pop(key) return cleared_events
Returns and flush the event buffer. In case dag_ids is specified it will only return and flush events for the given dag_ids. Otherwise it returns and flushes all :param dag_ids: to dag_ids to return events for, if None returns all :return: a dict of events
BaseExecutor.get_event_buffer
airflow/executors/base_executor.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/executors/base_executor.py#L160-L179
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def _get_conn_params(self): """ one method to fetch connection params as a dict used in get_uri() and get_connection() """ conn = self.get_connection(self.snowflake_conn_id) account = conn.extra_dejson.get('account', None) warehouse = conn.extra_dejson.get('warehouse', None) database = conn.extra_dejson.get('database', None) region = conn.extra_dejson.get("region", None) role = conn.extra_dejson.get('role', None) conn_config = { "user": conn.login, "password": conn.password or '', "schema": conn.schema or '', "database": self.database or database or '', "account": self.account or account or '', "warehouse": self.warehouse or warehouse or '', "region": self.region or region or '', "role": self.role or role or '', } """ If private_key_file is specified in the extra json, load the contents of the file as a private key and specify that in the connection configuration. The connection password then becomes the passphrase for the private key. If your private key file is not encrypted (not recommended), then leave the password empty. """ private_key_file = conn.extra_dejson.get('private_key_file', None) if private_key_file: with open(private_key_file, "rb") as key: passphrase = None if conn.password: passphrase = conn.password.strip().encode() p_key = serialization.load_pem_private_key( key.read(), password=passphrase, backend=default_backend() ) pkb = p_key.private_bytes(encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption()) conn_config['private_key'] = pkb conn_config.pop('password', None) return conn_config
[ "def", "_get_conn_params", "(", "self", ")", ":", "conn", "=", "self", ".", "get_connection", "(", "self", ".", "snowflake_conn_id", ")", "account", "=", "conn", ".", "extra_dejson", ".", "get", "(", "'account'", ",", "None", ")", "warehouse", "=", "conn", ".", "extra_dejson", ".", "get", "(", "'warehouse'", ",", "None", ")", "database", "=", "conn", ".", "extra_dejson", ".", "get", "(", "'database'", ",", "None", ")", "region", "=", "conn", ".", "extra_dejson", ".", "get", "(", "\"region\"", ",", "None", ")", "role", "=", "conn", ".", "extra_dejson", ".", "get", "(", "'role'", ",", "None", ")", "conn_config", "=", "{", "\"user\"", ":", "conn", ".", "login", ",", "\"password\"", ":", "conn", ".", "password", "or", "''", ",", "\"schema\"", ":", "conn", ".", "schema", "or", "''", ",", "\"database\"", ":", "self", ".", "database", "or", "database", "or", "''", ",", "\"account\"", ":", "self", ".", "account", "or", "account", "or", "''", ",", "\"warehouse\"", ":", "self", ".", "warehouse", "or", "warehouse", "or", "''", ",", "\"region\"", ":", "self", ".", "region", "or", "region", "or", "''", ",", "\"role\"", ":", "self", ".", "role", "or", "role", "or", "''", ",", "}", "\"\"\"\n If private_key_file is specified in the extra json, load the contents of the file as a private\n key and specify that in the connection configuration. The connection password then becomes the\n passphrase for the private key. If your private key file is not encrypted (not recommended), then\n leave the password empty.\n \"\"\"", "private_key_file", "=", "conn", ".", "extra_dejson", ".", "get", "(", "'private_key_file'", ",", "None", ")", "if", "private_key_file", ":", "with", "open", "(", "private_key_file", ",", "\"rb\"", ")", "as", "key", ":", "passphrase", "=", "None", "if", "conn", ".", "password", ":", "passphrase", "=", "conn", ".", "password", ".", "strip", "(", ")", ".", "encode", "(", ")", "p_key", "=", "serialization", ".", "load_pem_private_key", "(", "key", ".", "read", "(", ")", ",", "password", "=", "passphrase", ",", "backend", "=", "default_backend", "(", ")", ")", "pkb", "=", "p_key", ".", "private_bytes", "(", "encoding", "=", "serialization", ".", "Encoding", ".", "DER", ",", "format", "=", "serialization", ".", "PrivateFormat", ".", "PKCS8", ",", "encryption_algorithm", "=", "serialization", ".", "NoEncryption", "(", ")", ")", "conn_config", "[", "'private_key'", "]", "=", "pkb", "conn_config", ".", "pop", "(", "'password'", ",", "None", ")", "return", "conn_config" ]
[ "one", "method", "to", "fetch", "connection", "params", "as", "a", "dict", "used", "in", "get_uri", "()", "and", "get_connection", "()" ]
apache/airflow
def _get_conn_params(self): """ one method to fetch connection params as a dict used in get_uri() and get_connection() """ conn = self.get_connection(self.snowflake_conn_id) account = conn.extra_dejson.get('account', None) warehouse = conn.extra_dejson.get('warehouse', None) database = conn.extra_dejson.get('database', None) region = conn.extra_dejson.get("region", None) role = conn.extra_dejson.get('role', None) conn_config = { "user": conn.login, "password": conn.password or '', "schema": conn.schema or '', "database": self.database or database or '', "account": self.account or account or '', "warehouse": self.warehouse or warehouse or '', "region": self.region or region or '', "role": self.role or role or '', } """ If private_key_file is specified in the extra json, load the contents of the file as a private key and specify that in the connection configuration. The connection password then becomes the passphrase for the private key. If your private key file is not encrypted (not recommended), then leave the password empty. """ private_key_file = conn.extra_dejson.get('private_key_file', None) if private_key_file: with open(private_key_file, "rb") as key: passphrase = None if conn.password: passphrase = conn.password.strip().encode() p_key = serialization.load_pem_private_key( key.read(), password=passphrase, backend=default_backend() ) pkb = p_key.private_bytes(encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption()) conn_config['private_key'] = pkb conn_config.pop('password', None) return conn_config
one method to fetch connection params as a dict used in get_uri() and get_connection()
SnowflakeHook._get_conn_params
airflow/contrib/hooks/snowflake_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/snowflake_hook.py#L47-L96
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def get_uri(self): """ override DbApiHook get_uri method for get_sqlalchemy_engine() """ conn_config = self._get_conn_params() uri = 'snowflake://{user}:{password}@{account}/{database}/' uri += '{schema}?warehouse={warehouse}&role={role}' return uri.format(**conn_config)
[ "def", "get_uri", "(", "self", ")", ":", "conn_config", "=", "self", ".", "_get_conn_params", "(", ")", "uri", "=", "'snowflake://{user}:{password}@{account}/{database}/'", "uri", "+=", "'{schema}?warehouse={warehouse}&role={role}'", "return", "uri", ".", "format", "(", "*", "*", "conn_config", ")" ]
[ "override", "DbApiHook", "get_uri", "method", "for", "get_sqlalchemy_engine", "()" ]
apache/airflow
def get_uri(self): """ override DbApiHook get_uri method for get_sqlalchemy_engine() """ conn_config = self._get_conn_params() uri = 'snowflake://{user}:{password}@{account}/{database}/' uri += '{schema}?warehouse={warehouse}&role={role}' return uri.format(**conn_config)
override DbApiHook get_uri method for get_sqlalchemy_engine()
SnowflakeHook.get_uri
airflow/contrib/hooks/snowflake_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/snowflake_hook.py#L98-L105
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def get_conn(self): """ Returns a snowflake.connection object """ conn_config = self._get_conn_params() conn = snowflake.connector.connect(**conn_config) return conn
[ "def", "get_conn", "(", "self", ")", ":", "conn_config", "=", "self", ".", "_get_conn_params", "(", ")", "conn", "=", "snowflake", ".", "connector", ".", "connect", "(", "*", "*", "conn_config", ")", "return", "conn" ]
[ "Returns", "a", "snowflake", ".", "connection", "object" ]
apache/airflow
def get_conn(self): """ Returns a snowflake.connection object """ conn_config = self._get_conn_params() conn = snowflake.connector.connect(**conn_config) return conn
Returns a snowflake.connection object
SnowflakeHook.get_conn
airflow/contrib/hooks/snowflake_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/snowflake_hook.py#L107-L113
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def _get_aws_credentials(self): """ returns aws_access_key_id, aws_secret_access_key from extra intended to be used by external import and export statements """ if self.snowflake_conn_id: connection_object = self.get_connection(self.snowflake_conn_id) if 'aws_secret_access_key' in connection_object.extra_dejson: aws_access_key_id = connection_object.extra_dejson.get( 'aws_access_key_id') aws_secret_access_key = connection_object.extra_dejson.get( 'aws_secret_access_key') return aws_access_key_id, aws_secret_access_key
[ "def", "_get_aws_credentials", "(", "self", ")", ":", "if", "self", ".", "snowflake_conn_id", ":", "connection_object", "=", "self", ".", "get_connection", "(", "self", ".", "snowflake_conn_id", ")", "if", "'aws_secret_access_key'", "in", "connection_object", ".", "extra_dejson", ":", "aws_access_key_id", "=", "connection_object", ".", "extra_dejson", ".", "get", "(", "'aws_access_key_id'", ")", "aws_secret_access_key", "=", "connection_object", ".", "extra_dejson", ".", "get", "(", "'aws_secret_access_key'", ")", "return", "aws_access_key_id", ",", "aws_secret_access_key" ]
[ "returns", "aws_access_key_id", "aws_secret_access_key", "from", "extra" ]
apache/airflow
def _get_aws_credentials(self): """ returns aws_access_key_id, aws_secret_access_key from extra intended to be used by external import and export statements """ if self.snowflake_conn_id: connection_object = self.get_connection(self.snowflake_conn_id) if 'aws_secret_access_key' in connection_object.extra_dejson: aws_access_key_id = connection_object.extra_dejson.get( 'aws_access_key_id') aws_secret_access_key = connection_object.extra_dejson.get( 'aws_secret_access_key') return aws_access_key_id, aws_secret_access_key
returns aws_access_key_id, aws_secret_access_key from extra intended to be used by external import and export statements
SnowflakeHook._get_aws_credentials
airflow/contrib/hooks/snowflake_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/snowflake_hook.py#L115-L129
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def _get_field(self, field_name, default=None): """ Fetches a field from extras, and returns it. This is some Airflow magic. The grpc hook type adds custom UI elements to the hook page, which allow admins to specify scopes, credential pem files, etc. They get formatted as shown below. """ full_field_name = 'extra__grpc__{}'.format(field_name) if full_field_name in self.extras: return self.extras[full_field_name] else: return default
[ "def", "_get_field", "(", "self", ",", "field_name", ",", "default", "=", "None", ")", ":", "full_field_name", "=", "'extra__grpc__{}'", ".", "format", "(", "field_name", ")", "if", "full_field_name", "in", "self", ".", "extras", ":", "return", "self", ".", "extras", "[", "full_field_name", "]", "else", ":", "return", "default" ]
[ "Fetches", "a", "field", "from", "extras", "and", "returns", "it", ".", "This", "is", "some", "Airflow", "magic", ".", "The", "grpc", "hook", "type", "adds", "custom", "UI", "elements", "to", "the", "hook", "page", "which", "allow", "admins", "to", "specify", "scopes", "credential", "pem", "files", "etc", ".", "They", "get", "formatted", "as", "shown", "below", "." ]
apache/airflow
def _get_field(self, field_name, default=None): """ Fetches a field from extras, and returns it. This is some Airflow magic. The grpc hook type adds custom UI elements to the hook page, which allow admins to specify scopes, credential pem files, etc. They get formatted as shown below. """ full_field_name = 'extra__grpc__{}'.format(field_name) if full_field_name in self.extras: return self.extras[full_field_name] else: return default
Fetches a field from extras, and returns it. This is some Airflow magic. The grpc hook type adds custom UI elements to the hook page, which allow admins to specify scopes, credential pem files, etc. They get formatted as shown below.
GrpcHook._get_field
airflow/contrib/hooks/grpc_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/grpc_hook.py#L112-L123
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def copy_expert(self, sql, filename, open=open): """ Executes SQL using psycopg2 copy_expert method. Necessary to execute COPY command without access to a superuser. Note: if this method is called with a "COPY FROM" statement and the specified input file does not exist, it creates an empty file and no data is loaded, but the operation succeeds. So if users want to be aware when the input file does not exist, they have to check its existence by themselves. """ if not os.path.isfile(filename): with open(filename, 'w'): pass with open(filename, 'r+') as f: with closing(self.get_conn()) as conn: with closing(conn.cursor()) as cur: cur.copy_expert(sql, f) f.truncate(f.tell()) conn.commit()
[ "def", "copy_expert", "(", "self", ",", "sql", ",", "filename", ",", "open", "=", "open", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", ":", "pass", "with", "open", "(", "filename", ",", "'r+'", ")", "as", "f", ":", "with", "closing", "(", "self", ".", "get_conn", "(", ")", ")", "as", "conn", ":", "with", "closing", "(", "conn", ".", "cursor", "(", ")", ")", "as", "cur", ":", "cur", ".", "copy_expert", "(", "sql", ",", "f", ")", "f", ".", "truncate", "(", "f", ".", "tell", "(", ")", ")", "conn", ".", "commit", "(", ")" ]
[ "Executes", "SQL", "using", "psycopg2", "copy_expert", "method", ".", "Necessary", "to", "execute", "COPY", "command", "without", "access", "to", "a", "superuser", "." ]
apache/airflow
def copy_expert(self, sql, filename, open=open): """ Executes SQL using psycopg2 copy_expert method. Necessary to execute COPY command without access to a superuser. Note: if this method is called with a "COPY FROM" statement and the specified input file does not exist, it creates an empty file and no data is loaded, but the operation succeeds. So if users want to be aware when the input file does not exist, they have to check its existence by themselves. """ if not os.path.isfile(filename): with open(filename, 'w'): pass with open(filename, 'r+') as f: with closing(self.get_conn()) as conn: with closing(conn.cursor()) as cur: cur.copy_expert(sql, f) f.truncate(f.tell()) conn.commit()
Executes SQL using psycopg2 copy_expert method. Necessary to execute COPY command without access to a superuser. Note: if this method is called with a "COPY FROM" statement and the specified input file does not exist, it creates an empty file and no data is loaded, but the operation succeeds. So if users want to be aware when the input file does not exist, they have to check its existence by themselves.
PostgresHook.copy_expert
airflow/hooks/postgres_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/postgres_hook.py#L63-L83
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def bulk_load(self, table, tmp_file): """ Loads a tab-delimited file into a database table """ self.copy_expert("COPY {table} FROM STDIN".format(table=table), tmp_file)
[ "def", "bulk_load", "(", "self", ",", "table", ",", "tmp_file", ")", ":", "self", ".", "copy_expert", "(", "\"COPY {table} FROM STDIN\"", ".", "format", "(", "table", "=", "table", ")", ",", "tmp_file", ")" ]
[ "Loads", "a", "tab", "-", "delimited", "file", "into", "a", "database", "table" ]
apache/airflow
def bulk_load(self, table, tmp_file): """ Loads a tab-delimited file into a database table """ self.copy_expert("COPY {table} FROM STDIN".format(table=table), tmp_file)
Loads a tab-delimited file into a database table
PostgresHook.bulk_load
airflow/hooks/postgres_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/postgres_hook.py#L85-L89
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def bulk_dump(self, table, tmp_file): """ Dumps a database table into a tab-delimited file """ self.copy_expert("COPY {table} TO STDOUT".format(table=table), tmp_file)
[ "def", "bulk_dump", "(", "self", ",", "table", ",", "tmp_file", ")", ":", "self", ".", "copy_expert", "(", "\"COPY {table} TO STDOUT\"", ".", "format", "(", "table", "=", "table", ")", ",", "tmp_file", ")" ]
[ "Dumps", "a", "database", "table", "into", "a", "tab", "-", "delimited", "file" ]
apache/airflow
def bulk_dump(self, table, tmp_file): """ Dumps a database table into a tab-delimited file """ self.copy_expert("COPY {table} TO STDOUT".format(table=table), tmp_file)
Dumps a database table into a tab-delimited file
PostgresHook.bulk_dump
airflow/hooks/postgres_hook.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/postgres_hook.py#L91-L95
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def execute(self, context): """ Uploads the file to Google cloud storage """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) hook.upload( bucket_name=self.bucket, object_name=self.dst, mime_type=self.mime_type, filename=self.src, gzip=self.gzip, )
[ "def", "execute", "(", "self", ",", "context", ")", ":", "hook", "=", "GoogleCloudStorageHook", "(", "google_cloud_storage_conn_id", "=", "self", ".", "google_cloud_storage_conn_id", ",", "delegate_to", "=", "self", ".", "delegate_to", ")", "hook", ".", "upload", "(", "bucket_name", "=", "self", ".", "bucket", ",", "object_name", "=", "self", ".", "dst", ",", "mime_type", "=", "self", ".", "mime_type", ",", "filename", "=", "self", ".", "src", ",", "gzip", "=", "self", ".", "gzip", ",", ")" ]
[ "Uploads", "the", "file", "to", "Google", "cloud", "storage" ]
apache/airflow
def execute(self, context): """ Uploads the file to Google cloud storage """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) hook.upload( bucket_name=self.bucket, object_name=self.dst, mime_type=self.mime_type, filename=self.src, gzip=self.gzip, )
Uploads the file to Google cloud storage
FileToGoogleCloudStorageOperator.execute
airflow/contrib/operators/file_to_gcs.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/file_to_gcs.py#L68-L82
b69c686ad8a0c89b9136bb4b31767257eb7b2597
def max_partition( table, schema="default", field=None, filter_map=None, metastore_conn_id='metastore_default'): """ Gets the max partition for a table. :param schema: The hive schema the table lives in :type schema: str :param table: The hive table you are interested in, supports the dot notation as in "my_database.my_table", if a dot is found, the schema param is disregarded :type table: str :param metastore_conn_id: The hive connection you are interested in. If your default is set you don't need to use this parameter. :type metastore_conn_id: str :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :type filter_map: map :param field: the field to get the max value from. If there's only one partition field, this will be inferred :type field: str >>> max_partition('airflow.static_babynames_partitioned') '2015-01-01' """ from airflow.hooks.hive_hooks import HiveMetastoreHook if '.' in table: schema, table = table.split('.') hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) return hh.max_partition( schema=schema, table_name=table, field=field, filter_map=filter_map)
[ "def", "max_partition", "(", "table", ",", "schema", "=", "\"default\"", ",", "field", "=", "None", ",", "filter_map", "=", "None", ",", "metastore_conn_id", "=", "'metastore_default'", ")", ":", "from", "airflow", ".", "hooks", ".", "hive_hooks", "import", "HiveMetastoreHook", "if", "'.'", "in", "table", ":", "schema", ",", "table", "=", "table", ".", "split", "(", "'.'", ")", "hh", "=", "HiveMetastoreHook", "(", "metastore_conn_id", "=", "metastore_conn_id", ")", "return", "hh", ".", "max_partition", "(", "schema", "=", "schema", ",", "table_name", "=", "table", ",", "field", "=", "field", ",", "filter_map", "=", "filter_map", ")" ]
[ "Gets", "the", "max", "partition", "for", "a", "table", "." ]
apache/airflow
def max_partition( table, schema="default", field=None, filter_map=None, metastore_conn_id='metastore_default'): """ Gets the max partition for a table. :param schema: The hive schema the table lives in :type schema: str :param table: The hive table you are interested in, supports the dot notation as in "my_database.my_table", if a dot is found, the schema param is disregarded :type table: str :param metastore_conn_id: The hive connection you are interested in. If your default is set you don't need to use this parameter. :type metastore_conn_id: str :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :type filter_map: map :param field: the field to get the max value from. If there's only one partition field, this will be inferred :type field: str >>> max_partition('airflow.static_babynames_partitioned') '2015-01-01' """ from airflow.hooks.hive_hooks import HiveMetastoreHook if '.' in table: schema, table = table.split('.') hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) return hh.max_partition( schema=schema, table_name=table, field=field, filter_map=filter_map)
Gets the max partition for a table. :param schema: The hive schema the table lives in :type schema: str :param table: The hive table you are interested in, supports the dot notation as in "my_database.my_table", if a dot is found, the schema param is disregarded :type table: str :param metastore_conn_id: The hive connection you are interested in. If your default is set you don't need to use this parameter. :type metastore_conn_id: str :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :type filter_map: map :param field: the field to get the max value from. If there's only one partition field, this will be inferred :type field: str >>> max_partition('airflow.static_babynames_partitioned') '2015-01-01'
max_partition
airflow/macros/hive.py
test
python
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/macros/hive.py#L23-L55
b69c686ad8a0c89b9136bb4b31767257eb7b2597

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
0
Add dataset card