import scrapy
import items
import pipelines
import os
import random
import json
import datetime

from Core.Config import Config
import Core.Gadget as Gadget

config = Config(os.getcwd() + "\\config.json")

logger = config.Logger("SpiderXueqiu")
database = config.DataBase()
realtime = config.RealTime()

jsonObject = json.load(open(os.getcwd() + "\\agentList.json", 'r', encoding='utf-8'))
agentList = jsonObject["AgentList"]

jsonObject = json.load(open(os.getcwd() + "\\cookieList.json", 'r', encoding='utf-8'))
cookieList = jsonObject["CookieList"]


class XueqiuSpider(scrapy.Spider):
    name = "Xueqiu"
    allowed_domains = ["www.xueqiu.com"]
    start_urls = ['https://www.xueqiu.com/']

    custom_settings = {

        "COOKIES_ENABLED": False,
        "DOWNLOAD_DELAY": 1,
        "PROXIES": [
            {'ip_port': '119.4.172.166:80', 'user_pass': ''},
            {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ],
        "HTTPERROR_ALLOWED_CODES" : [400,403],
        'ITEM_PIPELINES': {
            'pipelines.MongoDBPipeline': 0  # 和开头的引用要匹配呼应
        },
        "MONGO_DATABASE_ADDR" : config.cfg["MongoDBAddress"],
        "MONGO_DATABASE_PORT": config.cfg["MongoDBPort"]

    }

    def Header(self, stockcode):
        ua = random.choice(agentList)  # 随机抽取User-Agent
        headers = {}
        headers['X-Requested-With'] = 'XMLHttpRequest'
        headers['User-Agent'] = ua[0]
        headers['Host'] = 'xueqiu.com'
        headers['Connection'] = 'keep-alive'
        headers['Accept'] = '*/*'
        headers['Referer'] = 'https://xueqiu.com/S/' + stockcode + '/follows'
        headers['Cookie'] = cookieList[1]

        #
        url = 'https://xueqiu.com/recommend/pofriends.json?type=1&code=' + stockcode
        return url, headers

    #
    def start_requests(self):

        #
        print("Start To Crawl")
        print("Existing settings: %s" % self.settings.attributes.keys())

        #for url in self.start_urls:
        #    yield self.make_requests_from_url(url)

        query = {}
        # query={"limit":1}
        instruments = database.find("Instruments", "Stock", query=query)

        # ---Loop Stock---
        count = 0
        for instrument in instruments:
            count += 1
            symbol = instrument["Symbol"]
            symbol_split = symbol.split(".")
            stockcode = symbol_split[1] + symbol_split[0]

            #
            url, headers = self.Header(stockcode)
            param = {"Symbol": symbol}
            request = scrapy.Request(url, method="GET", headers=headers, callback=lambda response, param=param: self.parsePofriends(response,param))
            yield request


    def parse(self, response):
        pass


    def parsePofriends(self, response, param):

        try:
            html = json.loads(response.text)
        except Exception as e:
            print("Parse Error not Json", e)
            return

        totalFollows = html['totalcount']

        #
        symbol = param["Symbol"]
        document = {}
        document["DataBaseName"] = "Sentiment"
        document["CollectionName"] = "XueqiuFollows"
        document["Symbol"] = symbol
        document["Follows"] = totalFollows
        now = datetime.datetime.now()
        document["UpdateDateTime"] = now # 这里用了一个Local时间，是否合理？

        # 凌晨更新，视为昨日数据
        date = Gadget.ToDate(now)  # 今日凌晨0点
        if now.time() > datetime.time(6, 0, 0):
            # 今日午夜12点
            date += datetime.timedelta(days=1)

        document["Key"] = symbol + "_" + Gadget.ToDateTimeString(date)
        document["StdDateTime"] = Gadget.ToUTCDateTime(date)
        yield document