# -*- coding: utf-8 -*-
import datetime
import json

import scrapy
from scrapy.utils.project import get_project_settings

from hk_bigdata.items import RegionItem, PopItem, FlowItem, FlowAllItem
from hk_bigdata.spiders.data_base import DataBase


class HkPopulationSpider(scrapy.Spider):
    name = 'hk_population'
    allowed_domains = ['heat.qq.com']
    start_urls = ['https://heat.qq.com/access/bigdata/population?sub_domain=police']
    flow_start_url = 'https://heat.qq.com/access/bigdata/flow?sub_domain=police'
    flow_all_start_url = 'https://heat.qq.com/access/bigdata/flowsum?sub_domain=police'
    region_data = {}
    db = DataBase()
    headers = get_project_settings().get("DEFAULT_REQUEST_HEADERS")
    
    def start_requests(self):
        """
        这是一个重载函数,它的作用是发出第一个Request请求
        :return:
        """
        # 带着header, data 去请求self.start_urls[0], 返回的response会被送到
        # 回调函数parse中
        # 从数据库里拿出地点和其id
        sql_tb = """
                             select * from hk_bigdata.shanghai_region ORDER BY city
                     """
        regions = self.db.select_tb(sql_tb)
        # 当前时间
        # 当前时间
        date_time_end = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:00")
        # 当前时间延后10分钟
        date_time_end_10 = (datetime.datetime.now() + datetime.timedelta(minutes=10)).strftime("%Y-%m-%d %H:%M:00")
        # 当前时间 - 5 minutes
        date_time_begin = (datetime.datetime.now() - datetime.timedelta(minutes=5)).strftime("%Y-%m-%d %H:%M:00")
        for region in regions:
            self.region_data = {
                "region_id": region[1],
                "date_time_begin": date_time_begin,
                "date_time_end": date_time_end,
                "interval": 1,
                "sub_domain": "police"
            }
            meta = {
                "city": region[0],
                "id": region[1],
                "name": region[2]
            }
            yield scrapy.Request(self.start_urls[0],
                                 method='POST',
                                 callback=self.parse,
                                 body=json.dumps(self.region_data),
                                 meta=meta)
    
    def parse(self, response):
        # 人流量
        res_dict = json.loads(response.text)
        meta = response.meta
        if res_dict["message"] == "success":
            pop_data = res_dict["data"]
            for data in pop_data:
                item = PopItem()
                item["city"] = meta["city"]
                item["id"] = meta["id"]
                item["name"] = meta["name"]
                item["tm"] = data["tm"]
                item["max"] = data["max"]
                item["min"] = data["min"]
                item["val"] = data["value"]
                yield item
            yield scrapy.Request(self.flow_start_url,
                                 method='POST',
                                 callback=self.flow_parse,
                                 body=json.dumps(self.region_data),
                                 meta=meta)
    
    def flow_parse(self, response):
        # 进入进出
        res_dict = json.loads(response.text)
        meta = response.meta
        if res_dict["message"] == "success":
            flow_data = res_dict["data"]
            for data in flow_data:
                item = FlowItem()
                item["city"] = meta["city"]
                item["id"] = meta["id"]
                item["name"] = meta["name"]
                item["tm"] = data["tm"]
                item["flow_in"] = data["in"]
                item["flow_out"] = data["out"]
                yield item
            yield scrapy.Request(self.flow_all_start_url,
                                 method='POST',
                                 callback=self.flow_all_parse,
                                 body=json.dumps(self.region_data),
                                 meta=meta)
    
    def flow_all_parse(self, response):
        # 进入进出总数
        res_dict = json.loads(response.text)
        meta = response.meta
        if res_dict["message"] == "success":
            flow_all_data = res_dict["data"]
            for data in flow_all_data:
                item = FlowAllItem()
                item["city"] = meta["city"]
                item["id"] = meta["id"]
                item["name"] = meta["name"]
                item["tm"] = data["tm"]
                item["flow_all_in"] = data["in"]
                item["flow_all_out"] = data["out"]
                yield item
