import io
import sys
import urllib.request  # 获取网页内容
from bs4 import BeautifulSoup  # 解析网页
import re  # 正则匹配想要的数据
import pandas as pd
from Write_data import*
from random import randint
import datetime
import time


class valley_spider():

    # 构造函数进行初始化
    # 700-2180jiu-0+300-1820si-1
    def __init__(self, dataAmount=2180, fileName="dataset/九寨沟tourismFlow.csv", place=0):

        # 类数据成员
        self.urls = []  # 存储生成的URL
        self.saveFileName = fileName  # 数据要保存到的文件
        self.valleyData_date = pd.DataFrame
        self.valleyDate_flow = []
        self.place = place

        # 每日客流数据
        self.names = ['date', 'flow']
        self.valleyData = pd.DataFrame(columns=self.names)

        # 设置默认四姑娘山数据量
        if place == 1 and dataAmount == 2180:
            dataAmount = 1280
            self.saveFileName = "dataset/四姑娘山tourismFlow.csv"

        self.dataAmount = dataAmount  # 数据量

        # 主程序入口
        self.main()

    # 主程序入口函数实现

    def main(self):

        # sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') #改变标准输出的默认编码

        # 调用生成URL函数
        if self.place == 0:
            self.urlGenerateJiu()
        else:
            self.urlGenerateSi()

        # 调用解析网页代码
        self.getHtml()

        # data倒序
        self.valleyData = self.valleyData.iloc[::-1]
        # print(self.valleyData)

        #异常处理：
        try:
            # 调用写文件模块
            Write_data(self.saveFileName, self.valleyData)
        except :
            print("您的写文件过程报错啦，考虑一下是不是把文件打开了， 打开的文件我可不允许写数据。")
        

    # 生成四姑娘山URL函数
    def urlGenerateSi(self):

        # 网址格式以及生成序列
        lastUrl = 'https://www.sgns.cn/news/number?start=10'
        urlNums = list(range(int(300/10), int(self.dataAmount/10)))  # 300-1820

        # 生成需要的URL网址，变量start=10的倍数
        for urlNum in urlNums:
            urlNum = urlNum*10
            self.urls.append(
                "https://www.sgns.cn/news/number?start=%d" % (urlNum))

    # 生成九寨沟URL函数
    def urlGenerateJiu(self):

        # 网址格式以及生成序列
        lastUrl = 'https://www.jiuzhai.com/news/number-of-tourists?start=2360'
        urlNums = list(range(int(700/20), int(self.dataAmount/20)))  # 700-2180

        # 生成需要的URL网址，变量start=20的倍数
        for urlNum in urlNums:
            urlNum = urlNum*20
            self.urls.append(
                "https://www.jiuzhai.com/news/number-of-tourists?start=%d" % (urlNum))

    # 获取网页（html）
    def getHtml(self):
        # 所有获取数据的代码

        for url in self.urls:

            # res=urllib.request.urlopen(url)

            # 异常处理：避免网络断开引起的错误。
            try:
                res = urllib.request.urlopen(url)
                # print(res.status)#输出状态码
                sta = res.status
                if sta == 200:
                    pass
                else :
                    print("获取网页不成功，状态码为："+sta)
            except:
                print("亲爱的，网络断开~~~~~")

            htmlBytes = res.read()
            html_real = htmlBytes.decode('utf-8')

            # 正则提取（把网页变成一棵文档树（beautifulsoup树））
            soup = BeautifulSoup(html_real, "html.parser")

            # 调用使用美汤解析数据的函数
            if self.place == 0:
                self.analyzeHtmlJiu(soup)

                # 合并数据
                self.mergeDataJiu(self.valleyData_date, self.valleyData_flow)
            else:
                self.analyzeHtmlSi(soup)

                # 合并数据
                self.mergeDataSi(self.valleyData_date, self.valleyData_flow)

    # 解析网页利用美汤九寨沟（beautifulSoup）

    def analyzeHtmlJiu(self, soup):

        # 解析初始化存储list
        self.valleyData_flow = []
        self.valleyData_date = []

        # 循环解析页面
        for tr_tag in soup.find_all('tr'):
            for sub_tag in tr_tag:
                tourism = sub_tag.string
                if tourism == None:
                    for sub_sub_tag in sub_tag:
                        tourism_flow = sub_sub_tag.string
                        str_tourism_flow = str(tourism_flow)  # 强制类型转换

                        # 过滤掉tr标签中无用子标签
                        if "九寨沟" in str_tourism_flow:
                            fliter_str_tourism_flow = str_tourism_flow.replace(
                                " ", "")
                            fliter_str_tourism_flow = fliter_str_tourism_flow.replace(
                                "\n", "")
                            fliter_str_tourism_flow = fliter_str_tourism_flow.replace(
                                "\t", "")
                            re_flow = re.findall(
                                "[0-9]+", fliter_str_tourism_flow)
                            self.valleyData_flow.append(re_flow[0])
                            # print("客流量~~："+fliter_str_tourism_flow)

                        # 处理网站人员工作失误2013-10-24那一天统计问题
                        elif "2013-10-24" in str_tourism_flow:
                            self.valleyData_flow.append(18000)

                        # 处理网站人员工作失误2014-10-06那一天统计问题
                        elif "黄金周" in str_tourism_flow:
                            self.valleyData_flow.append(
                                int(208595/6)+randint(-50, 50))

                            datetem = datetime.datetime.strptime(
                                "2014-10-06", '%Y-%m-%d').strftime('%Y-%m-%d')
                            self.valleyData_date.append(datetem)
                            # print("改：对应的日期~~："+str(self.valleyData_date[-1]))
                            # print("改：客流量~~："+str(self.valleyData_flow[-1]))
                            self.valleyData_flow.append(
                                int(208595/6)+randint(-50, 50))

                            datetem = datetime.datetime.strptime(
                                "2014-10-05", '%Y-%m-%d').strftime('%Y-%m-%d')
                            self.valleyData_date.append(datetem)
                            # print("改：对应的日期~~："+str(self.valleyData_date[-1]))
                            # print("改：客流量~~："+str(self.valleyData_flow[-1]))
                            self.valleyData_flow.append(
                                int(208595/6)+randint(-50, 50))

                            datetem = datetime.datetime.strptime(
                                "2014-10-04", '%Y-%m-%d').strftime('%Y-%m-%d')
                            self.valleyData_date.append(datetem)
                            # print("改：对应的日期~~："+str(self.valleyData_date[-1]))
                            # print("改：客流量~~："+str(self.valleyData_flow[-1]))
                            self.valleyData_flow.append(
                                int(208595/6)+randint(-50, 50))

                            datetem = datetime.datetime.strptime(
                                "2014-10-03", '%Y-%m-%d').strftime('%Y-%m-%d')
                            self.valleyData_date.append(datetem)
                            # print("改：对应的日期~~："+str(self.valleyData_date[-1]))
                            # print("改：客流量~~："+str(self.valleyData_flow[-1]))
                            self.valleyData_flow.append(
                                int(208595/6)+randint(-50, 50))

                            datetem = datetime.datetime.strptime(
                                "2014-10-02", '%Y-%m-%d').strftime('%Y-%m-%d')
                            self.valleyData_date.append(datetem)
                            # print("改：对应的日期~~："+str(self.valleyData_date[-1]))
                            # print("改：客流量~~："+str(self.valleyData_flow[-1]))
                            self.valleyData_flow.append(
                                int(208595/6)+randint(-50, 50))

                # 过滤出日期数据
                elif "-" in str(str(tourism)):
                    str_tourism_date = str(tourism)  # 强制类型转换
                    fliter_str_tourism_date = str_tourism_date.replace(" ", "")
                    fliter_str_tourism_date = fliter_str_tourism_date.replace(
                        "\n", "")
                    fliter_str_tourism_date = fliter_str_tourism_date.replace(
                        "\t", "")

                    # #转换为日期类型
                    # fliter_str_tourism_date = datetime.datetime.strptime(fliter_str_tourism_date, '%Y-%m-%d').strftime('%Y-%m-%d')

                    # fliter_str_tourism_date['date'] = fliter_str_tourism_date['date'].apply(lambda x:time.mktime(x.timetuple()))

                    self.valleyData_date.append(fliter_str_tourism_date)
                    # print("对应的日期~~："+fliter_str_tourism_date)

    # 解析网页利用美汤四姑娘山（beautifulSoup）
    def analyzeHtmlSi(self, soup):

        # 解析初始化存储list
        self.valleyData_flow = []
        self.valleyData_date = []
        tag = 0

        # 循环解析页面
        for tr_tag in soup.find_all('tr'):
            for sub_tag in tr_tag:
                tourism = sub_tag.string
                if tourism == None:
                    for sub_sub_tag in sub_tag:
                        tourism_flow = sub_sub_tag.string
                        str_tourism_flow = str(tourism_flow)  # 强制类型转换

                        # 过滤掉tr标签中无用子标签
                        if "四姑娘山" in str_tourism_flow:
                            fliter_str_tourism_flow = str_tourism_flow.replace(
                                " ", "")
                            fliter_str_tourism_flow = fliter_str_tourism_flow.replace(
                                "\n", "")
                            fliter_str_tourism_flow = fliter_str_tourism_flow.replace(
                                "\t", "")
                            re_flow = re.findall(
                                "[0-9]+", fliter_str_tourism_flow)
                            self.valleyData_flow.append(re_flow[0])
                            tag = 0
                            # print("客流量~~："+fliter_str_tourism_flow)

                        elif "国庆" in str_tourism_flow:
                            self.valleyData_flow.append(10000)
                            tag = 1

                # 过滤出日期数据
                elif "年" in str(str(tourism)):
                    str_tourism_date = str(tourism)  # 强制类型转换
                    fliter_str_tourism_date = str_tourism_date.replace(" ", "")
                    fliter_str_tourism_date = fliter_str_tourism_date.replace(
                        "\n", "")
                    fliter_str_tourism_date = fliter_str_tourism_date.replace(
                        "\t", "")

                    # #转换为日期类型
                    # fliter_str_tourism_date = datetime.datetime.strptime(fliter_str_tourism_date, '%Y年%m月%d日').strftime('%Y-%m-%d')
                    # fliter_str_tourism_date['date'] = fliter_str_tourism_date['date'].apply(lambda x:time.mktime(x.timetuple()))

                    if tag == 0:
                        self.valleyData_date.append(fliter_str_tourism_date)
                    # print("对应的日期~~："+fliter_str_tourism_date)

    # 九寨沟山合并数据
    def mergeDataJiu(self, date, flow):

        # 合并日期和客流数据
        num = 0
        for i in date:
            flow_date_list = []
            flow_date_list.append(date[num])

            # 异常处理：避免官网客流数据缺失导致的错误
            try:
                flow_date_list.append(flow[num])
            except:
                print("错误数据："+date[num])

            list_tem = []
            list_tem.append(flow_date_list)
            df_tem = pd.DataFrame(columns=self.names, data=list_tem)
            self.valleyData = pd.concat([self.valleyData, df_tem])
            num = num+1

    # 四姑娘山合并数据
    def mergeDataSi(self, date, flow):

        # 合并日期和客流数据
        num = 0
        for i in date:
            flow_date_list = []
            flow_date_list.append(date[num])
            flow_date_list.append(flow[num])
            list_tem = []
            list_tem.append(flow_date_list)
            df_tem = pd.DataFrame(columns=self.names, data=list_tem)
            self.valleyData = pd.concat([self.valleyData, df_tem])
            num = num+1


# valley_spider(place=1,fileName="四姑娘山客流.csv",dataAmount=400)#调试
# valley_spider(place=0,fileName="九寨沟客流.csv",dataAmount=800)#调试
