#coding=utf-8
from Core import MongoDB

import datetime
import json
import scrapy
import items
import pipelines
import Gadget27
import Parser
import logging
import re

import logging


#深圳交易所爬虫
#大宗交易披露，混合Symbol，翻页

class SSESpider(scrapy.Spider):

    name = "SSE_BlockTrade_Mixed"

    # 创建一个logger
    logger = logging.getLogger()
    # 创建一个handler，用于写入日志文件
    fh = logging.handlers.TimedRotatingFileHandler("d:/log/BlockTrade/" + name + ".log", when='D', interval=1, backupCount=40)
    # 定义handler的输出格式formatter
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    # 定义一个filter
    filter = logging.Filter('root')
    fh.addFilter(filter)
    # 给logger添加handler
    logger.addHandler(fh)

    #
    start_urls = ["http://www.sse.com.cn/disclosure/diclosure/block/deal/"]

    #allowed_domains = [
    #    'sse.com.cn',
    #]

    #---TimeRange---
    #datetime1 = "2000-01-01"
    #datetime2 = "2017-07-05"

    # Update at 2017-9-15
    datetime1 = "2017-07-06"
    datetime2 = "2017-09-15"

    #
    custom_settings = {
        'ITEM_PIPELINES' : {
        'pipelines.MongoDBPipeline': 0 #和开头的引用要匹配呼应
        },
        "COOKIES_ENABLED" : False,
        "DOWNLOAD_DELAY" : 1,
        "PROXIES" : [
          {'ip_port': '119.4.172.166:80', 'user_pass': ''},
          {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ]
    }

    def start_requests(self):
        print("Start To Crawl")
        print("Existing settings: %s" % self.settings.attributes.keys())
        #for url in self.start_urls:
        #    yield self.make_requests_from_url(url)

        #---必须去在网页提交一次日期,才能得到页码Navigation---
        #---First Query---
        input = {}
        input["StartDate"] = self.datetime1
        input["EndDate"] = self.datetime2
        #input["MaxPage"] = maxPage
        input["Page"] = 1
        #
        url, headers = self.GotoPage(input)
        yield scrapy.Request(url, method="GET", headers=headers)


    #---Download Page 入口---
    def parse(self, response):
        print("Parse")
        self.logger.debug("Parse")

        #---Parse Navigation---
        j = self.ParseJasonResponse(response)
        maxPage = j["pageHelp"]["pageCount"]
        page = j["pageHelp"]["pageNo"]
        startPage = 1
        endPage = 1

        #maxPage = 1097
        #startPage = 3
        #endPage = 1097


        input = {}
        input["StartDate"] = self.datetime1
        input["EndDate"] = self.datetime2
        input["MaxPage"] = maxPage
        input["Page"] = page
        list = self.ParseContent(response, input)
        for item in list:
            yield item

        #---Loop Pages---
        #for page in range(startPage, endPage + 1):
        page += 1
        if page <= maxPage:
            #
            print("Goto Page: " + str(page))
            input["Page"] = page
            #
            url, headers = self.GotoPage(input)
            #request = scrapy.Request(url, method="GET", headers=headers, callback=lambda response, param=input: self.ParseContent(response, param))
            request = scrapy.Request(url, method="GET", headers=headers, callback=lambda response, param=input: self.parse(response))
            yield request


    def ParseJasonResponse(self, response):
        text = response.body
        j = None
        try:
            index = text.find("(")

            #---去除前后的小括号---
            s = text[index + 1:len(text) - 1]
            j = json.loads(s)
        except:
            print("Parse Response Failed : " + text)
            if self.logger != None:
                self.logger.error("Parse Response Failed : " + text)
        return j


    def GotoPage(self, input):
        #
        url = "http://query.sse.com.cn/commonQuery.do"
        #
        params = {
            "jsonCallBack": "jsonpCallback3181",
            "isPagination": "true",
            "sqlId": "COMMON_SSE_XXPL_JYXXPL_DZJYXX_L_1",
            "stockId": "",
            "startDate": input["StartDate"],
            "endDate": input["EndDate"],
            "pageHelp.pageSize": "25",
            "pageHelp.pageNo": str(input["Page"]),
            "pageHelp.beginPage": str(input["Page"]),
            "pageHelp.endPage": "5",
            "pageHelp.cacheSize": "1",
            "_": "1499309625002"
        }
        url = Gadget27.GenerateURL(url, params)

        #
        headers = {
            "Accept": "*/*",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh - CN, zh;q = 0.8",
            "Connection": "keep-alive",
            "Host": "query.sse.com.cn",
            "Referer": "http://www.sse.com.cn/disclosure/diclosure/block/deal/",
            "User-Agent": "Mozilla / 5.0(Windows NT 6.1; WOW64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        }
        return url,headers


    #---Parse Content, Yield Item to Pipeline (Save to Database)
    def ParseContent(self, response, input):
        #params = response.meta['Params']
        s = "Parse Content: Page:" + str(input["Page"]) + " Max:" + str(input["MaxPage"])
        print(s)
        self.logger.debug(s)

        #---Parse---
        j = self.ParseJasonResponse(response)
        list = Parser.ParseSSEBlockTrade(input, j, self.logger)
        #
        return list




