import random
import re
import urllib
import urllib.request

#  User-Agent 列表
UA = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 '
    'Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 '
    'Safari/537.36 Edge/18.18362',
    ]


def ua() -> tuple:
    """
    random generate a ua
    :return: tuple  User-Agent,ua
    """
    return "User-Agent", random.choice(UA)


# builtin urllib使用
# python中urllib比较底层， python2中对urllib进行了高层次封装urllib2，python3中将urllib2合到urllib，因此直接使用urllib即可
def crawler():
    url = 'https://list.youku.com/category/show/c_96_g_%E7%A7%91%E5%B9%BB.html'
    opener = urllib.request.build_opener()
    opener.addheaders = [ua()]
    # 配置opener ,opener可以配置认证、代理、请求头等信息
    urllib.request.install_opener(opener)

    # 除了通过install_opener方式, 还可以使用Request对象
    # from urllib.request import Request
    # req = Request(url, headers={'a':'b'})
    # req.add_header('User-agent', 'xxx')
    # import ssl
    # context = ssl._create_unverified_context()  # 有些网站使用https, 但是证书不被信任，此处可以使用ssl不验证证书
    # urllib.request.urlopen(req, context)  # 下面的处理相同

    with urllib.request.urlopen(url) as resp:
        print(resp.status, resp.geturl(), resp.info())  # 状态码、请求url、响应headers
        data = resp.read().decode('utf-8')
        for m in re.finditer("pack-film(.*?)yk-pack", data):
            if m:
                item = m.group()
                #  (?P<varName>regex>) 匹配的正则表达式可以通过定义的属性名访问
                title = re.search('title="(?P<title>.*?)"', item).group('title')
                tag = re.search('<span>(?P<tag>.*?)</span>', item).group('tag')
                m1 = re.search('<em>(?P<main>.*?)</em>', item)
                actor_title = m1.group('main')
                start_index = m1.end()
                m1 = re.compile('title="(?P<actor>.*?)"').search(item, start_index)
                actors = []
                while m1:
                    actors.append(m1.group('actor'))
                    start_index = m1.end()
                    m1 = re.compile('title="(?P<actor>.*?)"').search(item, start_index)
                time_str = re.compile("<li>(?P<time>.*?)</li>").search(item, start_index).group('time')
                print(f'<{title}> {tag} {actor_title}{actors} ({time_str})')


def parse_demo():
    import urllib.parse
    parm = {
        'id': 123,
        'name': '中古'
    }
    query_str = urllib.parse.urlencode(parm)  # 特殊字符编码
    print(query_str)
    query_str = urllib.parse.unquote(query_str)  # 解码
    print(query_str)
    print(urllib.parse.parse_qs(query_str))  # query string解析为字典 dict{k1:[v1], k2:[v2]}
    print(urllib.parse.parse_qsl(query_str))  # query string解析为list序列 list[(k1,v1),(k2,v2)]
    parts = urllib.parse.urlparse('http://baidu.com?a=b&c=d')  # url拆分为 scheme: domain path params query..
    print(parts)
    print(urllib.parse.urlunparse(parts))  # urlparse拆分结果组合为url
    print(urllib.parse.urlsplit('http://baidu.com'))  # url拆分，与urlparse相似但是不拆分参数
    print(urllib.parse.urljoin('http://baidu.com', 'abc.html'))  # url拼接
    print(urllib.parse.urljoin('http://baidu.com/xxx/efg.html', 'abc.html'))  # html 替换
    print(urllib.parse.urldefrag('http://baidu.com/xxx/abc.html'))  # 返回一个url和碎片？


class Film:
    def __init__(self, title, subTitle, summary, link):
        self._title = title
        self._subTitle = subTitle
        self._summary = summary
        self._link = link

    @property
    def title(self):
        return self._title

    @title.setter
    def title(self, title):
        self._title = title

    def __repr__(self):
        return f'<Film {self._title}, {self._subTitle}, {self._summary} {self._link}>'
    __str__ = __repr__


# urllib opener json 接口
def crawler1():
    url = "https://list.youku.com/category/page?c=96&g=%E7%A7%91%E5%B9%BB&type=show&p="
    opener = urllib.request.build_opener()
    opener.addheaders = [ua()]
    urllib.request.install_opener(opener)
    import json
    for i in range(1, 5):
        # films = []
        print('-'*10, f'page {i}', '-'*10)
        with urllib.request.urlopen(url + str(i)) as f:
            json_str = f.read().decode('utf-8')
            resp_data = json.loads(json_str)
            if resp_data['success']:
                film_list = resp_data['data']
                for item in film_list:
                    print(Film(item.get('title'), item.get('subTitle'), item.get('summary'), item.get('videoLink')))
                    # films.append(Film(item.title, item.subTitle, item.summary, item.videoLink))


# urllib3 三方库， 实现了连接池等功能，scrapy也是用该库
# pip install urllib3
def crawler2():
    url = "https://list.youku.com/category/page?c=96&g=%E7%A7%91%E5%B9%BB&type=show&p=1"
    import urllib3
    # 连接池
    with urllib3.PoolManager() as http:
        resp = http.request('GET', url, headers={
            'User-agent': ua()[1]
        })
        http.urlopen()
        print(type(resp), resp.status, resp.reason)
        print(resp.headers, resp.data)


# requests 三方http模块, 依赖urllib3 ,更高层次封装
def crawler3():
    import requests
    import json
    url = "https://list.youku.com/category/page?c=96&g=%E7%A7%91%E5%B9%BB&type=show&p="
    _ua = ua()
    headers = {_ua[0]: _ua[1]}
    for i in range(1, 5):
        resp = requests.get(url+str(i), headers=headers)
        print('-' * 10, f'page {i}', resp.status_code, '-' * 10)
        if resp.status_code == 200:
            resp_data = json.loads(resp.text)
            if resp_data['success']:
                film_list = resp_data['data']
                for item in film_list:
                    print(Film(item.get('title'), item.get('subTitle'), item.get('summary'), item.get('videoLink')))


def crawler4():
    import requests
    url = "https://list.youku.com/category/page?c=96&g=%E7%A7%91%E5%B9%BB&type=show&p="
    _ua = ua()
    session = requests.session()  # requests中连接使用会话管理，一次session可以发送很多请求，减少连接占用
    with session:
        # session: requests.Session;  # 用来提示方法有哪些方法和函数, 运行时候需要注释掉
        for i in range(1, 3):
            resp = session.get(url + str(i), headers={_ua[0]: _ua[1]})
            with resp:
                print(resp.status_code, resp.url)
                print(resp.request.headers)  # request 头，第一次请求时没有Cookie, 第二次时同一个session会发现头多一个cookie
                print('-' * 66)


# lxml xml html高效解析和构建 模块
# https://lxml.de/installation.html
# https://lxml.de/tutorial.html
# 此外还可以在浏览器中安装xpath插件
def lxml_demo():
    from lxml import etree
    root = etree.Element('root')
    root.set('id', '0')  # 设置属性
    root.append(etree.Element('child1', id='1', name='tom'))  # 添加子节点并设置属性
    child2 = etree.SubElement(root, 'child2')
    child2.text = 'hello world'  # 设置标签内容
    root.insert(0, etree.Element('child0'))
    print(root[1], root[1].get('id'))  # child1
    print(root.items(), root.keys(), root.get('id'), root.attrib['id'])  # root节点的属性集合
    print(type(child2), child2.tag, child2.getparent())
    print(len(root), list(root), root[0])
    print(etree.tostring(root))
    print(root.xpath('//*'))
    print(root.xpath('//child1/@id'))
    print(root.xpath('//child2/text()'))
    print(root.xpath('//@id'))


# lxml requests
def xpath_requests_demo():
    import requests
    from lxml import etree
    url = 'https://list.youku.com/category/show/c_96_g_%E7%A7%91%E5%B9%BB.html'
    resp = requests.get(url)
    with resp:
        html = etree.HTML(resp.text)
        nodes = html.xpath('//li[@class="yk-col4 mr1"]')
        print(len(nodes), nodes)
        for n in nodes:
            print('-' * 66)
            print(n.xpath('.//ul[@class="p-info pos-bottom"]//span/text()'))  # 获取当前节点下所有含有属性p-info..的ul，ul下的所有span的内容，就一个span
            print(n.xpath('.//ul[@class="info-list"]/li[@class="title"]/a/text()'))  # title
            print(n.xpath('.//ul[@class="info-list"]/li[@class="actor"]/a/text()'))  # actor
            print(n.xpath('.//ul[@class="info-list"]/li[last()]/text()'))  # time


# 此外如果需要调用js, 可以安装PyExecJs
# pycharm -> settings -> project:xxx -> python interpreter 点击+ available packages输入搜索package
if __name__ == '__main__':
    # crawler()
    # parse_demo()
    # crawler1()
    # crawler2()
    # crawler3()
    # crawler4()
    # lxml_demo()
    xpath_requests_demo()
