#encoding:utf-8                                                                      
import requests                                                                      
from bs4 import BeautifulSoup 
import pandas as pd
import numpy as np   
from Write_data import*

class Weather_spider():

    #构造函数进行初始化操作。
    def __init__(self,fileName="小金天气15-19.csv",place=1):
        self.urls = []
        self.saveFileName =fileName;
        self.place=place
        self.weatherData = [] #最终数据结构为dataframe
        #主程序入口
        self.main()
    
    #程序主入口&主逻辑
    def main(self):
        # self.changeCoding("utf-8")
        self.urlGenerate()
        self.getHtml(self.urls)
        self.dealData(self.weatherData)
        print(self.weatherData)
        #调用写文件模块
        Write_data(self.saveFileName,self.weatherData)

    #改变标准输出的默认编码（解决乱码问题）
    # def changeCoding(self,coding="utf-8"):
        # sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding=coding)

    #天气网站总url集合生成（包括2019年每一天的天气）
    def urlGenerate(self):
        self.urls=[]
        
        if self.place==0:  
            # for year in range(2016,2017): #测试
            for year in range(2012,2018): #九寨沟2012-2017年的天气数据
                for month in range(1,13):
                    self.urls.append("http://lishi.tianqi.com/jiuzhaigou/%d%02d.html"%(year,month)) 
        else:
            # for year in range(2019,2020): #测试
            for year in range(2015,2020): #四姑娘山2015-2019年的天气数据
                for month in range(1,13):
                    self.urls.append("http://lishi.tianqi.com/xiaojin/%d%02d.html"%(year,month)) 
            

    #反反爬措施，伪装头部。特别特别重要，因为这个网站已经增加了反爬措施，所以必须加以应对。
    def fakeHeader(self,url):
        kv = {'User-Agent': 'Mozilla/5.0'}# 是一个标准的浏览器的身份标识的字段
        # kv={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'}
        response = requests.get(url,headers=kv)#注意这里要加headers，因为headers已经更该过。 
        return response

    #获取网页（html）
    def getHtml(self,urls):
        for url in urls:      
            response=self.fakeHeader(url);                                                  
            soup =BeautifulSoup(response.text,'html.parser') #生成结构化美汤数据 
            self.analyzeHtml(soup)  
         

    #解析网页利用美汤（beautifulSoup）     
    def analyzeHtml(self,soup):
        #解析html
        weather_list = soup.select('div[class="tian_three"]')  #原来的网站已经改版，标签名字变了
        for weather in weather_list:                                                     
            ul_list = weather.select('ul')     
            # print(ul_list)#  测试一下标签的截取情况                                                                                                              
            for ul in ul_list:                                                           
                li_list= ul.select('li') 
                # print(li_list)#还是测试标签的截取                                                                                                          
                for li in li_list:   
                    weather_day = li.get_text()
                    weather_day =weather_day.split("\n")
                    weather_day = list(filter(lambda x: x, weather_day))
                    # print(weather_day)
                    self.weatherData.append(weather_day)
        
        
    def dealData(self,weatherData):
        # 切割日期，去除星期，只保留年月日
        for x in self.weatherData:
            x[0]=x[0].split(" ")[0]
            
        # 将list转化为dateFrame类型
        names=['date','最高温度','最低温度','weather','风力']
        self.weatherData=pd.DataFrame(columns=names, data=self.weatherData)
        

# Weather_spider()   #调试        
                              