import threading
import lxml
import requests
from lxml import etree
import json

class ConsumerThread(threading.Thread):
    def __init__(self,startUrl,headers,startNum,endNum,tname):
        threading.Thread.__init__(self)
        self.startUrl=startUrl
        self.headers=headers
        self.startNum=startNum
        self.endNum=endNum
        self.tname=tname

    def run(self):
        for page in range(self.startNum, self.endNum + 25, 25):
            res = request_page(self.startUrl + str(page), self.headers)
            res = lxml.etree.HTML(res)
            for div in res.xpath("//div[@class='item']"):
                num=div.xpath('//*[@id="content"]/div/div[1]/ol/li[*]/div/div[2]/div[2]/div/span[2]/text()')[0]
                title = div.xpath('./div[2]/div[1]/a[1]/span[1]/text()')[0]
                print(title)
                print(num)
                save_file(title,num)


def request_page(startUrl,headers):
    response=requests.get(startUrl,headers=headers)   #去掉headers
    response.encoding="utf-8"
    if response.status_code == 200:
        return response.text
def get_headers():
    headers={
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Host': 'movie.douban.com',
        'Referer': 'https://movie.douban.com/top250?start=0&filter=',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
    }
    return headers

def save_file(title,num):
    with open("豆瓣250.json", "a", encoding="utf-8")as f:
        dictdata = {
            "title": title,
            "num": num
        }
        diledata = json.dumps(dictdata, ensure_ascii=False, indent=3)
        f.write(diledata)


if __name__ == '__main__':
    url = 'https://movie.douban.com/top250?start='
    header=get_headers()
    thread1 = ConsumerThread(url,header,0,100,'A')
    thread2 = ConsumerThread(url,header,100,200,'B')
    thread1.start()
    thread2.start()
    thread1.join()
    thread2.join()




