import requests
from lxml import etree
import time
import csv
from multiprocessing import Pool

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}


def get_mainurl(url):  # 定义获取月份天气的详细url 函数
    res = requests.get(url, headers=headers)
    main_url = []
    if res.status_code == 200:  # 判断请求状态
        selector = etree.HTML(res.text)
        htmlurls = selector.xpath('//div[contains(@id,"content")]/div')  # 循环点
        try:
            for htmlurl in htmlurls:
                Jan = htmlurl.xpath('ul[1]/li[2]/a/@href')[0]  # 一月份天气url
                main_url.append(Jan)  # 将网址放入列表中，一个一个放是很蠢的方法，但我也确实不知道其他方法了，下同
                Feb = htmlurl.xpath('ul[1]/li[3]/a/@href')[0]  # 二月份天气url
                main_url.append(Feb)
                Mar = htmlurl.xpath('ul[1]/li[4]/a/@href')[0]  # 同上，下类推
                main_url.append(Mar)
                Apr = htmlurl.xpath('ul[2]/li[2]/a/@href')[0]
                main_url.append(Apr)
                May = htmlurl.xpath('ul[2]/li[3]/a/@href')[0]
                main_url.append(May)
                June = htmlurl.xpath('ul[2]/li[4]/a/@href')[0]
                main_url.append(June)
                July = htmlurl.xpath('ul[3]/li[2]/a/@href')[0]
                main_url.append(July)
                Aug = htmlurl.xpath('ul[3]/li[3]/a/@href')[0]
                main_url.append(Aug)
                Sep = htmlurl.xpath('ul[3]/li[4]/a/@href')[0]
                main_url.append(Sep)
                Oct = htmlurl.xpath('ul[4]/li[2]/a/@href')[0]
                main_url.append(Oct)
                Nov = htmlurl.xpath('ul[4]/li[3]/a/@href')[0]
                main_url.append(Nov)
                Dec = htmlurl.xpath('ul[4]/li[4]/a/@href')[0]
                main_url.append(Dec)

                time.sleep(0.001)  # 休眠0.5s
        except IndexError:
            pass
        return main_url  # 将存了所有url的列表返回
    else:
        pass


def link_url(url):
    final_urls = []
    list_urls = get_mainurl(url)
    print(list_urls)
    for list_url in list_urls:
        if len(list_url) < 30:  # 因为获取的url有一些少了‘/lishi/’，所以需要判断一下
            list_url = 'http://www.tianqihoubao.com/lishi/' + list_url
            final_urls.append(list_url)
        else:
            list_url = 'http://www.tianqihoubao.com' + list_url
            final_urls.append(list_url)
    return final_urls


def get_infos(detail_url):  # 爬取月份天气详细数据函数
    main_res = requests.get(detail_url, headers=headers)
    print(main_res)
    main_sele = etree.HTML(main_res.text)
    print(main_sele)
    main_infos = main_sele.xpath('//div[@class="hd"]/div[1]/table/tr')
    i = True
    try:
        for info in main_infos:
            if i:  # 此处i的作用是跳过第一次循环，因为第一个是非天气数据
                i = False
                continue
            else:
                date = info.xpath('td[1]/a/text()')[0].replace("\r\n", '').replace(' ', '')  # 去掉换行符、空格等，下同
                weather = info.xpath('td[2]/text()')[0].replace("\r\n", '').replace(' ', '')
                temps = info.xpath('td[3]/text()')[0].replace('\r\n', '').replace(' ', '')
                clouds = info.xpath('td[4]/text()')[0].replace("\r\n", '').replace(' ', '')
                with open('data/tmp.csv', 'a+', newline='', encoding='UTF-8')as fp:  # 存入csv文件
                    print(f"save {date} ...")
                    writer = csv.writer(fp)
                    writer.writerow((date, weather, temps, clouds))
    except IndexError:
        pass


if __name__ == '__main__':  # 执行主程序
    url = 'http://www.tianqihoubao.com/lishi/nanjing.html'  # 获取月份天气url的网址
    details = link_url(url)
    with open('data/tmp.csv', 'w+', newline='', encoding='UTF-8')as ff:  # 写入第一行作为表头
        writer = csv.writer(ff)
        writer.writerow(('日期', '天气状况', '气温', '风力风向'))
    pool = Pool(processes=1)  # 使用多进程爬取
    pool.map(get_infos, details)  # 需要注意爬取结果并不是按顺序的，可以用excel进行排序
    # get_infos(details)
