"""
使用多线程爬取天气信息
"""
# 引入模块
import requests
from bs4 import BeautifulSoup
from utils import sqlhelper
from datetime import datetime, timedelta
import threading
from queue import Queue


class SpiderWeather:

    def __init__(self):
        self.urls = []
        self.error = ""


        # 准备headers
        self.headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; " + \
                                      "Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " + \
                                      "Chrome/69.0.3497.100 Safari/537.36"}

        # ====== 定义队列 ========
        self.url_queue = Queue()
        self.html_queue = Queue()
        self.data_queue = Queue()
        # ====== 返回值类型 =========
        self.res = {'status': False, 'date': datetime.now().strftime('%Y-%m-%d'), 'spider': 0, 'update': 0, 'insert': 0}
        # ====  自动获取 ======
        self.get_urls_from_db()

    def get_urls_from_db(self):
        """获取所有城市的URL"""
        sql = "Select Area, URL from CityURL where City=Area And URL is not Null;"
        # 执行
        response = sqlhelper.get_db_data(sql)
        # 判断
        if response['status']:
            self.urls = list(response['data'])
        else:
            self.error = response['error']

    def get_url_queue(self):
        """添加URL到队列"""
        # 遍历
        for one_city in self.urls:
            # 拼接数字
            self.url_queue.put({'city': one_city[0], 'url': one_city[1]})

    def get_content_queue(self):
        """在URL队列中取出URL的值"""
        while True:
            # 在url队列中取出一个url
            one_city = self.url_queue.get()
            # 获取当前url对应页面的文本
            response = requests.get(one_city['url'], headers=self.headers)
            # 添加内容
            one_city['content'] = response.content.decode("utf-8")
            # 把返回的内容放进html文本队列中
            self.html_queue.put(one_city)
            # 把取出的url给完成！
            self.url_queue.task_done()  # 取出的这个URL在url_queue队列中删除

    def get_data_queue(self):
        while True:
            # 在url队列中取出一个url
            one_city = self.html_queue.get()
            # 获取当前url对应页面的文本
            soup = BeautifulSoup(one_city['content'], 'lxml')
            # 获取未来6天的天气
            six_days = soup.find('div', id="day7").find_all('div', class_='weather pull-left')
            # 定义一个集合存储当前城市未来的天气
            # weathers = []  # [{'area':'北京','date':'2021-02-23', },{},{},{}]
            # 开始遍历
            for index, day in enumerate(six_days):
                # 定义一个和
                one_day_dict = {}
                # 添加城市
                one_day_dict['area'] = one_city['city']
                # 添加日期
                today = datetime.now()
                weather_date = today + timedelta(days=(index + 1))
                one_day_dict['date'] = weather_date.strftime('%Y-%m-%d')
                # 添加天气
                one_day_dict['weather'] = str(day.find('div', class_='desc').text).strip()
                # 添加风向
                one_day_dict['windd'] = str(day.find('div', class_='windd').text).strip()
                # 添加风力
                one_day_dict['winds'] = str(day.find('div', class_='winds').text).strip()
                # 添加温度
                temp_content = day.find_all('div', class_='tmp')
                # 添加最高温度℃
                one_day_dict['hight'] = str(temp_content[0].text).strip().replace("℃", "")
                # 添加最高温度℃
                one_day_dict['low'] = str(temp_content[1].text).strip().replace("℃", "")
                # 附加到集合
                self.data_queue.put(one_day_dict)
                # 添加1
                self.res['spider'] += 1

            self.html_queue.task_done()


    def save_data_db(self):
        while True:
            weather = self.data_queue.get()
            # 输出
            # 判断这个城市天气是否存在
            sql_get = "Select Area from CityWeather Where Area='%s' and Date ='%s'" % (weather['area'], weather['date'])
            # 执行
            response_get = sqlhelper.get_db_data(sql_get)
            # 判断是否有记录
            if len(response_get['data']) == 0:
                # 拼接SQL语句
                sql_insert = "Insert Into CityWeather(Area,Date,Weather,Windd,Winds,Hight,Low) Value ('%s','%s','%s','%s','%s'," \
                             "'%s', '%s')" % (
                             weather['area'], weather['date'], weather['weather'], weather['windd'], weather['winds'],
                             weather['hight'], weather['low'])

                # 执行
                response = sqlhelper.update_db(sql_insert)
                # 判断
                if response['status']:
                    # 数量 +1
                    self.res['insert'] += 1
                else:
                    print("%s的%s的天气写入失败！！！！" % (weather['area'], weather['date']))
            else:
                # 准备更新的SQL语句
                sql_update = "Update CityWeather Set Weather='%s',Windd='%s',Winds='%s',Hight='%s',Low='%s' " \
                             "where Area='%s' and Date ='%s'" % (weather['weather'], weather['windd'], weather['winds'],
                                                                 weather['hight'], weather['low'], weather['area'],
                                                                 weather['date'])
                # 执行
                response = sqlhelper.update_db(sql_update)
                # 判断
                if response['status']:
                    # 数量 +1
                    self.res['update'] += 1
                else:
                    print("%s的%s天气更新失败！！！！" % (weather['area'], weather['date']))

            # 关闭
            self.data_queue.task_done()

    def run(self):
        """使用多线程调度"""
        # 定义一个进程集合
        thread_list = []
        # ----------获取url -----------
        t_url = threading.Thread(target=self.get_url_queue)
        # 添加到集合中
        thread_list.append(t_url)
        # ----------获取网页文文本---------
        for i in range(5):
            t_parse = threading.Thread(target=self.get_content_queue)
            # 附加到集合中
            thread_list.append(t_parse)
        # ----------获取有效数据------------
        for i in range(1):
            t_content = threading.Thread(target=self.get_data_queue)
            thread_list.append(t_content)
        # ----------保存数据-----------------
        for i in range(1):
            t_save = threading.Thread(target=self.save_data_db)
            thread_list.append(t_save)

        # 启动所有的线程
        for t in thread_list:
            # 启用守护进程
            t.setDaemon(True)
            # 启动
            t.start()

        # 队列中所有的全部完成，程序结束
        for q in [self.url_queue, self.html_queue, self.data_queue]:
            q.join()  # 等待所有队列的元素全部处理完后程序才继续执行



        print("所有数据获取完成！")
        print(self.res)


if __name__ == '__main__':
    import time
    start =time.time()
    obj=SpiderWeather()
    obj.run()
    end = time.time()
    print("程序执行的时间：%.4f" %(end-start))




