import requests
import lxml.html as lh
from urllib.parse import urljoin
import time
import csv


def get_html(url):
    """
    获取html页面
    User-Agent：检查->Network->Preserve log->**.html->Headers->User-Agent
    :param url:
    :return:
    """
    header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
                            " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}
    response = requests.get(url, headers=header)
    return response.text if str(response.status_code).startswith("2") else None


# def get_detail_url_list(url):
#     html = get_html(url)
#     if not html:
#         print("Requests Not 200 Start")
#         return
#     parser = lh.etree.HTML(html)
#     detail_urls = [urljoin(url, i) for i in parser.xpath("//ul[contains(@class,'v_picTxt')]/li/div[@class='pic']/a/@href")]
#     next_page_tag = parser.xpath("//div[@class='v_page']/a[last()]")
#     if next_page_tag:
#         next_page_tag = next_page_tag[0]
#     next_page_url = next_page_tag.attrib.get("href") if "下一页" in next_page_tag.text else None
#     return next_page_url, detail_urls

def get_detail_url_list(url):
    """
    使用xpath获取url链接详情，返回url链接列表
    :param url:
    :return:
    """
    html = get_html(url)
    if not html:
        print("Requests Not 200 Start")
        return
    parser = lh.etree.HTML(html)
    detail_urls = [urljoin(url, i) for i in parser.xpath("//ul[contains(@class,'v_picTxt')]/li/div[@class='pic']/a/@href")]
    return detail_urls


def parse_detail(url):
    """
    获取详细url页面下的内容，用xpath解析
    :param url:
    :return:
    """
    html = get_html(url)
    if not html:
        print("Response not 2 start")
        return
    Etree = lh.etree.HTML(html)
    title = "".join(Etree.xpath("//div[@class='tit']/h1/text()"))
    Starrings = ",".join(Etree.xpath("//ul[contains(@class,'txtList')]/li[@class='liActor li_3']//a/text()"))
    Score = Etree.xpath("//div[@class='tit']/p/em/text()")[0].replace("分","")
    Introduction = "".join(Etree.xpath("//ul[contains(@class, 'txtList')]/li[1]/p/span/text()"))
    types = "".join(Etree.xpath("//ul[contains(@class,'txtList')]/li[@class='li_3']/a[2]/text()"))
    return [title, Starrings, Score, Introduction, types]


def spier():
    """
    循环获取所有页内容，保存到csv文件中
    :return:
    """
    i = 100
    j = 1
    while True:
        url = "https://dianying.2345.com/list/aiqing-------"
        url = url+str(i)
        url = url + ".html"
        urls = get_detail_url_list(url)
        for url in urls:
            data = parse_detail(url)
            write_csv("D:\\Python\\爬虫\\XpathTest\\movie.csv", data)
            print(url)
            j = j + 1
            print("第",i,"页","第",j,"部")
            if j == 36:
                i = i + 1
                j = 1
                break
        if i == 101:
            break


def write_csv(file_path,datas):
    """
    保持为csv文件
    :param file_path:
    :param datas:
    :return:
    """
    f = open(file=file_path, mode="a", encoding="utf8", newline="")
    writer = csv.writer(f)
    print(datas)
    writer.writerow(datas)
    f.close()


if __name__ == "__main__":
    start_time = time.time()
    spier()
    print("sum_time{}".format(time.time()-start_time))

