from urllib import request, parse
from lxml import etree
import ssl

# 取消 https 代理验证
ssl._create_default_https_context = ssl._create_unverified_context


def dyldsearch(self, line, page, searchword):
    # 报头
    # user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36'
    user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
    headers = {'User-Agent': user_agent}
    url = ''
    dyld_url = ''
    if line == '1':
        dyld_url = "http://dianyingleida.com"
        url = dyld_url+"/search.php?page="+page+"&searchword=" + parse.quote(searchword)
    elif line == '2':
        dyld_url = "http://www.nmkan.com"
        url = dyld_url+"/search.php?page=" + page + "&searchword=" + parse.quote(searchword)
    req = request.Request(url, headers=headers)
    try:
        response = request.urlopen(req)
        # 获取每页的HTML源码字符串
        html = response.read().decode('utf-8')
        # 解析html 为 HTML 文档
        selector = etree.HTML(html)
        # 获取搜索的列表信息
        search_list = selector.xpath('//ul[@id="searchList"]/li')
        node_list = []
        for node in search_list:
            name = node.xpath('div[@class="detail"]/h4[@class="title"]/a/text()')[0]
            director = node.xpath('div[@class="detail"]/p[1]/text()')[0]
            classify = node.xpath('div[@class="detail"]/p[3]')[0]
            classify = etree.tostring(classify, method='html').decode().strip()
            try:
                intro = node.xpath('div[@class="detail"]/p[4]/text()')[0]
            except Exception:
                intro = ""
            poster = node.xpath('div[@class="thumb"]/a/@data-original')[0]
            if poster.startswith('/upload'):
                poster = dyld_url + poster
            link = dyld_url+node.xpath('div[@class="thumb"]/a/@href')[0]
            definition = node.xpath('div[@class="thumb"]/a/span[@class="pic-text text-right"]/text()')[0]
            actor_list = node.xpath('div[@class="detail"]/p[2]/a')
            actor_node_list = []
            for actor_node in actor_list:
                try:
                    actor = actor_node.xpath('text()')[0]
                    actor_node_list.append(actor)
                except Exception:
                    actor = ""

            defalut_result = {
                "name": name,
                "director":director,
                "classify":classify,
                "intro":intro,
                "poster": poster,
                "link": link,
                "definition":definition,
                "actor": actor_node_list,
            }
            node_list.append(defalut_result)
        # 获取页数
        total = selector.xpath('//ul[@class="myui-page text-center clearfix"]/li[last()-2]/a/text()')[0].split('/')[1]

        result = {
            "total":total,
            "list":node_list,
        }

        return result
    except Exception:
        print(url, Exception)
        data = {"code": -1, "message": "error", "data": None}
        return data
