#coding=utf-8
import datetime
import json

import pytz
import scrapy
from Core import MongoDB
import items
import pipelines
import Gadget27
import Parser
import logging

#巨潮网爬虫
#个股公告，按照每个股票的页面爬取

class cninfoSpider(scrapy.Spider):

    # 创建一个logger
    logger = logging.getLogger()

    # 创建一个handler，用于写入日志文件
    #fh = logging.FileHandler('d:/log/EMNoticesByStock.log')
    fh = logging.handlers.TimedRotatingFileHandler('d:/log/CNInfoNoticesByStock.log',when='D',interval=1,backupCount=40)
    #fh2 = logging.FileHandler('d:/log/EMNoticesByStockScrapy.log')
    fh2 = logging.handlers.TimedRotatingFileHandler('d:/log/CNInfoNoticesByStockScrapy.log',when='D',interval=1,backupCount=40)


    # 定义handler的输出格式formatter
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    fh2.setFormatter(formatter)

    #定义一个filter
    filter = logging.Filter('root')
    fh.addFilter(filter)

    # 给logger添加handler
    logger.addHandler(fh)
    logger.addHandler(fh2)

    name = "http://www.cninfo.com.cn/ Spider Notice Crawled By Stock-Specific"

    #设定启动页面，也可以在 start_requests 函数中修改
    #---从一个起始页面进去，然后Query---
    allowed_domains = ["cninfo.com.cn"]
    start_urls = [
        "http://www.cninfo.com.cn/cninfo-new/disclosure/szse"
    ]

    #---读目录的入口语句---
    #---旧版本---
    #contentRequest1 = "http://data.eastmoney.com/Notice/NoticeStock.aspx?type=0&stockcode="
    #contentRequest2 = "&pn="

    #---新版本---
    contentRequest1 = "http://data.eastmoney.com/notices/getdata.ashx?StockCode=" #RowSymbol
    contentRequest2 = "&CodeType=1&PageIndex="  #Page
    contentRequest3 = "&PageSize=50&jsObj=SSsrijYp&SecNodeType=0&FirstNodeType=0&rt=49424429"
    #---Loop Stock Range---
    startIndex = 0
    endIndex = 0#0-99
    #stopDateTime = datetime.datetime(2000,1,1)
    stopDateTime = datetime.datetime(2016,12,1)
    stopDateTime = Gadget27.ToUTCDateTime(stopDateTime)

    custom_settings = {
        'ITEM_PIPELINES' : {
        'pipelines.MongoDBPipeline': 0 #和开头的引用要匹配呼应
        },
        "COOKIES_ENABLED" : False,
        "DOWNLOAD_DELAY" : 1,
        "PROXIES" : [
          {'ip_port': '119.4.172.166:80', 'user_pass': ''},
          {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ]
    }

    def start_requests(self):

        self.database = MongoDB.MongoDB("192.168.1.90", "27017")
        self.instruments = self.database.findAll("Instruments","Stock")

        print("Start To Crawl")
        print("Existing settings: %s" % self.settings.attributes.keys())
        for url in self.start_urls:
            yield self.make_requests_from_url(url)

    #---Download Page 入口---
    def parse(self, response):
        print("Parse Extry")

        #---Loop Stock---

        maxIndex = self.instruments.__len__()
        if self.endIndex+1 > maxIndex:
            self.endIndex = maxIndex-1

        #---Loop Stock---
        for i in range(self.startIndex, self.endIndex+1):
            stocki = i
            instrument = self.instruments[stocki]
            rawCode = instrument["RawCode"]

            #---Process Other Pages---
            inputParam2 = {}
            inputParam2["RawCode"] = rawCode
            inputParam2["Symbol"] = instrument["Symbol"]
            inputParam2["Page"] = 1

            request = self.contentRequest1 + inputParam2["RawCode"] + self.contentRequest2 + str(inputParam2["Page"]) + self.contentRequest3
            yield scrapy.Request(request, callback=lambda response, param=inputParam2: self.parseContent(response,param))


    #---读取每页的目录---
    def parseContent(self, response, param):
        print("Parse Content:" + param["RawCode"] + " Page:" + str(param["Page"]))

        #---新方法 直接解析接口Json文件---
        try:
            j = Gadget27.ParseJson(response.text)
            pages = int(j["pages"])
        except Exception as e:
            print(param["RawCode"] + " Can't Parse Json" )
            return

        i = 0
        for entry in j["data"]:
            i = i + 1
            inputParam = {}
            inputParam["Page"] = param["Page"]
            inputParam["i"] = i
            inputParam["Link"] = "http://data.eastmoney.com/notices/detail/" + param["RawCode"] + "/" + entry["INFOCODE"] + ",.html"
            inputParam["Symbol"] = param["Symbol"]
            inputParam["RawCode"] =  param["RawCode"]
            inputParam["Title"] = entry["NOTICETITLE"]
            inputParam["Category"] = entry["ANN_RELCOLUMNS"][0]["COLUMNNAME"]
            inputParam["DateTime"] = entry["NOTICEDATE"]

            #---处理日期---
            s = entry["NOTICEDATE"].split("+")[0]
            datetime1 = Gadget27.StringToDateTime(s,"%Y-%m-%dT%H:%M:%S")
            datetime1 = Gadget27.ToUTCDateTime(datetime1)
            inputParam["StdDateTime"] = datetime1

            #---还有一个带分钟的时间，也不知道是什么时间---
            s = entry["EUTIME"].split("+")[0]
            datetime2 = Gadget27.StringToDateTime(s,"%Y-%m-%dT%H:%M:%S")
            datetime2 = Gadget27.ToUTCDateTime(datetime2)
            inputParam["StdDateTime2"] = datetime2

            #---
            if datetime1 < self.stopDateTime:
                return

            #---排查重复---
            notices = self.database.findWithFilter("Text","Notice",{"Link":inputParam["Link"]})
            if len(notices) == 0:
                #print("Request:"+ inputParam["Title"] + ":" +inputParam["Link"])
                yield scrapy.Request(inputParam["Link"], callback=lambda response, param=inputParam:self.parseArticle(response,param))
            else:
                print("EXISTED:"+ inputParam["Title"] + ":" +inputParam["Link"])

        #---后面还有，翻页---
        if inputParam["Page"] < pages:
            inputParam1 = {}
            inputParam1["Symbol"] = param["Symbol"]
            inputParam1["RawCode"] = param["RawCode"]
            inputParam1["Page"] = param["Page"] + 1 #NexPage
            request = self.contentRequest1 + inputParam1["RawCode"] + self.contentRequest2 + str(inputParam1["Page"]) + self.contentRequest3
            yield scrapy.Request(request, callback=lambda response, param=inputParam1:self.parseContent(response,param))
        kkwood = 1


    #---读取每个页面的内容---
    def parseArticle(self, response, param):
        item = items.NoticeItem()
        item = Parser.ParseNotice(param,response,item, self.logger)
        yield item

