from urllib import request, parse
from lxml import etree
import ssl
import time
import requests

# 取消 https 代理验证
ssl._create_default_https_context = ssl._create_unverified_context


def aiqiyi(self, name):
    # 报头
    user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36'
    headers = {'User-Agent': user_agent}
    # url = "https://v.qq.com/x/search/?q=%E9%99%88%E6%83%85%E4%BB%A4"
    url = "https://so.iqiyi.com/so/q_" + parse.quote(name)
    req = request.Request(url, headers=headers)
    try:
        response = request.urlopen(req)
        # 获取每页的HTML源码字符串
        html = response.read().decode('utf-8')
        # 解析html 为 HTML 文档
        selector = etree.HTML(html)
        # 剧名
        defalut_name = selector.xpath('//ul[@class="mod_result_list"]/li[1]//h3[@class="result_title"]//font/text()')[0]
        if name == defalut_name:
            # 来源
            source = "爱奇艺视频"

            # 剧照
            pic = 'https:' + \
                  selector.xpath('(//ul[@class="mod_result_list"]/li)[1]/a//img/@src')[0]

            # 导演
            director = selector.xpath(
                "//ul[@class='mod_result_list']/li[1]//div[@class='info_item'][1]/div[@class='result_info_cont result_info_cont-half'][1]/a[@class='result_info_link']/text()")
            # 主演
            actor = selector.xpath(
                "//ul[@class='mod_result_list']/li[1]//div[@class='info_item'][1]/div[@class='result_info_cont result_info_cont-half'][2]/a[@class='result_info_link']/text()")

            # 简介
            desc = selector.xpath(
                "//ul[@class='mod_result_list']/li[@class='list_item'][1]//div[@class='info_item'][2]//span[@class='result_info_txt']/text()")[
                0]

            defalut_result = {
                "source": source,
                "pic": pic,
                "director": director,
                "actor": actor,
                "desc": desc,
            }

            # 需要跳转的链接
            href = selector.xpath('//ul[@class="mod_result_list"]/li[1]/a/@href')
            href_req = request.Request(href[0], headers=headers)
            response_href = request.urlopen(href_req)
            href_html = response_href.read().decode('utf-8')
            href_selector = etree.HTML(href_html)
            content_list = href_selector.xpath('//div[@class="piclist-wrapper"]//li/div[@class="site-piclist_info"]')
            list = []
            for item in content_list:
                title = item.xpath('p[@class="site-piclist_info_title"]/a/text()')[0]
                link = item.xpath('p[@class="site-piclist_info_title"]/a/@href')[0]
                list.append({"title": title, "link": link})
            defalut_result['list'] = list
            return defalut_result
    except Exception:
        print(url, Exception)
        data = {"code": -1, "message": "error", "data": None}
        return data
