#coding=utf-8
from Core import MongoDB

import datetime
import json
import scrapy
import items
import pipelines
import Gadget27
import Parser
import logging
import re

import logging


#深圳交易所爬虫
#大宗交易披露，混合Symbol，翻页

class SZexchangeSpider(scrapy.Spider):

    name = "SZExchange_BlockTrade_Mixed"

    # 创建一个logger
    logger = logging.getLogger()
    # 创建一个handler，用于写入日志文件
    fh = logging.handlers.TimedRotatingFileHandler("D:/Log/BlockTrade/" + name + ".log", when='D', interval=1, backupCount=40)
    # 定义handler的输出格式formatter
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    # 定义一个filter
    filter = logging.Filter('root')
    fh.addFilter(filter)
    # 给logger添加handler
    logger.addHandler(fh)

    #
    # ---最后一次更新址2017-7-5---
    start_urls = ["http://www.szse.cn/main/disclosure/news/xyjy/"]
    datetime1 = "2017-07-06"
    datetime2 = "2017-09-16"

    #
    custom_settings = {
        'ITEM_PIPELINES' : {
        'pipelines.MongoDBPipeline': 0 #和开头的引用要匹配呼应
        },
        "COOKIES_ENABLED" : False,
        "DOWNLOAD_DELAY" : 1,
        "PROXIES" : [
          {'ip_port': '119.4.172.166:80', 'user_pass': ''},
          {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ]
    }

    def start_requests(self):
        print("Start To Crawl")
        print("Existing settings: %s" % self.settings.attributes.keys())
        #for url in self.start_urls:
        #    yield self.make_requests_from_url(url)

        # ---First Query---
        input = {}
        input["StartDate"] = self.datetime1
        input["EndDate"] = self.datetime2
        #
        url, formdata = self.GotoPage(input, nagigate=False)
        print("Make a Begining Search")
        yield scrapy.FormRequest(url, formdata=formdata)


    #---Download Page 入口---
    def parse(self, response):
        print("Parse Entry")
        self.logger.debug("Parse Entry")
        param = self.ParseNavigation(response)
        #
        maxPage = param["MaxPage"]
        startPage = 1
        endPage = 1

        #---Loop Pages---
        for i in range(startPage, endPage + 1):
            #
            input = {}
            input["Page"] = i
            input["MaxPage"] = param["MaxPage"]
            input["StartDate"] = self.datetime1
            input["EndDate"] = self.datetime2
            input["EndDate"] = self.datetime2
            input["RecordCount"] = param["RecordCount"]
            #
            url, formdata = self.GotoPage(input, nagigate = True)
            print("Make Request " + str(input["Page"]) + " Max " + str(input["MaxPage"]))
            yield scrapy.FormRequest(url, formdata=formdata, callback=lambda response, param=input: self.ParseContent(response, param))

        #---翻页---
        #if page <= maxPage:
        #    yield scrapy.FormRequest(url, formdata=formdata, callback=self.parse)

    def ParseNavigation(self, response):
        print("Parse Navigation" )
        navi = response.xpath('//input/@onclick')
        for n in navi:
            text = n.extract()
            print(text)
            #gotoReportPageNo('1265_xyjy', 'tab1', 2, 23, 455)
            #gotoReportPageNoByTextBox('1265_xyjy', 'tab1', '1265_xyjy_tab1_naviboxid', 23, 455)
            match1 = re.match(u"(gotoReportPageNo\()(.*?)(\))", text)
            if match1 != None:
                start = match1.regs[2][0]
                end = match1.regs[2][1]
                text = text[start:end]
                texts = text.split(",")
                page = int(texts[2])
                maxPage = int(texts[3])
                recordCount = int(texts[4])
                break

        return {"Page": page, "MaxPage": maxPage, "RecordCount": recordCount}


    def ParseNavigation2(self, response):
        print("Parse Navigation" )

        #
        page = None
        maxPage = None
        navi = response.xpath('//td/text()')
        navi2 = response.xpath('//td/text()').re(u'当前第.*')

        for n in navi:
            text = n.extract()
            match1 = re.match(u"(当前第)(.*?)(页)", text)
            match2 = re.match(u"(.*?)(共)(.*?)(页)", text)
            if match1 != None:
                text = match1.string
                start = match1.regs[2][0]
                end = match1.regs[2][1]
                page = text[start:end]
                #
                text2 = match2.string
                maxPage = text2[match2.regs[3][0]:match2.regs[3][1]]
                break
        if page != None:
            page = int(page)
        if maxPage != None:
            maxPage = int(maxPage)

        return {"Page":page,"MaxPage":maxPage}


    def GotoPage(self, param, nagigate = True):

        # ---Next Page---
        url = "http://www.szse.cn/szseWeb/FrontController.szse"
        formData = {}
        formData["ACTIONID"] = "7"
        formData["AJAX"] = "AJAX-TRUE"
        formData["CATALOGID"] = "1265_xyjy"
        formData["TABKEY"] = "tab1"
        formData["txtKsrq"] = param["StartDate"]
        formData["txtZzrq"] = param["EndDate"]  # 2017-09-01

        if nagigate == True:
            formData["REPORT_ACTION"] = "navigate"
            formData["tab1PAGENO"] = str(param["Page"])
            formData["tab1PAGECOUNT"] = str(param["MaxPage"])
            formData["tab1RECORDCOUNT"] = str(param["RecordCount"])

        else:
            formData["REPORT_ACTION"] = "search"

        return url,formData


    #---Parse Content, Yield Item to Pipeline (Save to Database)
    def ParseContent(self, response, param):
        s = "Parse Content: Page: " + str(param["Page"]) + " Max:" + str(param["MaxPage"])
        print(s)
        self.logger.debug(s)

        #---Parse---
        list = Parser.ParseSZExchangeBlockTrade(param, response, self.logger)
        for item in list:
            yield item




