import requests
import lxml.html as lh
from urllib.parse import urljoin
import time
import threading
from queue import Queue
import csv


flag = False
lock = threading.Lock
filename = "movie.csv"


def write_csv(file_path,datas):
    f = open(file=file_path, mode="a", encoding="utf8", newline="")
    writer = csv.writer(f)
    print(datas)
    writer.writerow(datas)
    f.close()


def get_html(url):
    header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
                            " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}
    response = requests.get(url, headers=header)
    return response.text if str(response.status_code).startswith("2") else None


def get_detail_url_list(url):
    html = get_html(url)
    if not html:
        print("Requests Not 200 Start")
        return
    parser = lh.etree.HTML(html)
    detail_urls = [urljoin(url, i) for i in parser.xpath("//ul[contains(@class,'v_picTxt')]/li/div[@class='pic']/a/@href")]
    return detail_urls


def parse_detail(url):
    html = get_html(url)
    if not html:
        print("Response not 2 start")
        return
    Etree = lh.etree.HTML(html)
    title = "".join(Etree.xpath("//div[@class='tit']/h1/text()"))
    Starrings = ",".join(Etree.xpath("//ul[contains(@class,'txtList')]/li[1]//a/text()"))
    Score = Etree.xpath("//div[@class='tit']/p/em/text()")[0].replace("分","")
    Introduction = "".join(Etree.xpath("//span[@class='sPart']/text()"))
    types = "".join(Etree.xpath("//ul[contains(@class,'txtList')]/li[@class='li_4'][2]/a/text()"))
    # director = "".join(Etree.xpath("//ul[contains(class,'txtList')]/li[@class='li_4'][1]/em[not(@class"))
    # time_length = "".join(Etree.xpath("//ul[contains(@class,'txtList')]/li[@class='li_4'][3]/em[not(@class"))
    # years = "".join(Etree.xpath("//ul[contains(@class,'txtList')]/li[@class='li_4'][4]/em[not(@class)]/t"))
    area = "".join(Etree.xpath("//ul[contains(@class,'txtList')]/li[@class='li_4'][5]/a/text()"))
    return [title,Score]


class GetDetailUrlsThread(threading.Thread):
    def __init__(self, queue, url, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.queue = queue
        self.url = url


    def run(self):
        q = Queue(maxsize=100)
        i = 95
        j = 1
        while True:
            url = "https://dianying.2345.com/list/aiqing-------"
            url = url + str(i)
            url = url + ".html"
            urls = get_detail_url_list(self.url)
            for url in urls:
                self.queue.put(url)

                parse_detail(url)
                print(url)
                j = j + 1
                print("第", i, "页", "第", j, "部")
                if j == 36:
                    i = i + 1
                    j = 1
                    break
            if i == 101:
                break

        global flag
        lock.acquire()
        flag=True
        lock.release()


class ParseDetailThread(threading.Thread):
    def __init__(self, queue, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.queue = queue

    def run(self):
        while True:
            if self.queue.empty() and flag:
                return
            url = self.queue.get()
            data = parse_detail(url)
            lock.acquire()
            write_csv(filename, data)
            lock.release()


def spier():
    q = Queue(maxsize=100)
    i = 95
    j = 1
    while True:
        url = "https://dianying.2345.com/list/aiqing-------"
        url = url+str(i)
        url = url + ".html"
        urls = get_detail_url_list(url)
        for url in urls:
            parse_detail(url)
            print(url)
            j = j + 1
            print("第",i,"页","第",j,"部")
            if j == 36:
                i = i + 1
                j = 1
                break
        if i == 101:
            break

    t1 = GetDetailUrlsThread(queue=q, url=url)
    th = [t1]
    for i in range(5):
        th.append(ParseDetailThread(q))
    for i in th:
        i.start()
    for i in th:
        i.join()


if __name__ == "__main__":
    start_time = time.time()
    spier()
    print("sum_time{}".format(time.time()-start_time))

