#coding=utf-8
import datetime
import json

import pytz
import scrapy
from Core import MongoDB
import pipelines
import items

#东方财富网爬虫
#个股报告，按照每个股票的页面爬取

def ToDateTimeString(date):
        return date.strftime('%Y-%m-%d %H:%M:%S') + ".000"

def ToUTCDateTime(datetime1):
    datetime2 = datetime1 + datetime.timedelta(hours=-8)
    datetime2 = datetime2.replace(tzinfo=pytz.utc)
    return datetime2

def ToLocalDateTime(datetime1):
    datetime2 = datetime1 + datetime.timedelta(hours=+8)
    datetime2 = datetime2.replace(tzinfo = pytz.timezone("Asia/Shanghai"))
    return datetime2

def ProcessEastMoneyAuthor(author):
    authors = author.split(",")
    authors2 = []
    for name in authors:
        authors2.append(name.replace(" ",""))#去除空格
    return authors2

# 2016年03月10日 00:00
def ProcessEastMoneyDateTime(strDateTime):
    year = strDateTime[0:4]
    month = strDateTime[5:7]
    day = strDateTime[8:10]
    hour = strDateTime[12:14]
    min = strDateTime[15:17]
    return datetime.datetime(int(year),int(month),int(day),int(hour),int(min))

#2016-09-13T07:24:21
def ProcessEastMoneyDateTime2(strDateTime):
    year = strDateTime[0:4]
    month = strDateTime[5:7]
    day = strDateTime[8:10]
    hour = strDateTime[11:13]
    min = strDateTime[14:16]
    sec = strDateTime[17:19]
    return datetime.datetime(int(year),int(month),int(day),int(hour),int(min),int(sec))

class eastMoneySpider(scrapy.Spider):

    name = "emSingleStockReportByStock"

    #设定启动页面，也可以在 start_requests 函数中修改
    allowed_domains = ["eastmoney.com"]
    start_urls = [
        #"http://www.eastmoney.com/"
        "http://data.eastmoney.com/report/"
    ]

    # http://datainterface.eastmoney.com//EM_DataCenter/js.aspx?type=SR&sty=GGSR&js=var%20NoKlTKoH={%22data%22:[(x)],%22pages%22:%22(pc)%22,%22update%22:%22(ud)%22,%22count%22:%22(count)%22}&ps=25&p=1&code=002823&rt=49127360

    #---读目录的入口语句---
    contentRequest1 = "http://datainterface.eastmoney.com//EM_DataCenter/js.aspx?type=SR&sty=GGSR&js=var%20NoKlTKoH={%22data%22:[(x)],%22pages%22:%22(pc)%22,%22update%22:%22(ud)%22,%22count%22:%22(count)%22}&ps=25&p="
    contentRequest2 = "&code="
    contentRequest3 = "&rt=49127360"

    custom_settings = {
        'ITEM_PIPELINES' : {
        'pipelines.MongoDBPipeline': 0 #和开头的引用要匹配呼应
        },
        "COOKIES_ENABLED" : False,
        "DOWNLOAD_DELAY" : 1,
        "PROXIES" : [
          {'ip_port': '119.4.172.166:80', 'user_pass': ''},
          {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ]
    }

    def start_requests(self):

        self.database = MongoDB.MongoDB("192.168.1.90", "27017")
        self.instruments = self.database.findAll("Instruments","Stock")

        print("Start To Crawl")
        print("Existing settings: %s" % self.settings.attributes.keys())
        for url in self.start_urls:
            yield self.make_requests_from_url(url)

    #---Download Page 入口---
    def parse(self, response):
        print("Parse Extry")

        #---Loop Stock---
        startIndex = 0
        endIndex = 3000#0-99
        maxIndex = self.instruments.__len__()
        if endIndex+1 > maxIndex:
            endIndex = maxIndex-1
        #---Loop Page---/#---Loop Stock---
        for i in range(startIndex, endIndex+1):
            stocki = i
            instrument = self.instruments[stocki]
            rawCode = instrument["RawCode"]
            request = self.contentRequest1 + "1" + self.contentRequest2 + rawCode + self.contentRequest3

            #---For Test
            #symbol = "000001.SZ"
            #rawCode = symbol[0:6]
            #request = self.contentRequest1 + "1" + self.contentRequest2 + rawCode + self.contentRequest3
            #self.reports = self.database.findWithFilter("Text","ResearchReport",{"Symbol":symbol})
            #self.checkDuplicted = []

            #---Process Other Pages---
            inputParam2 = {}
            inputParam2["RawCode"] = rawCode
            inputParam2["Page"] = 1
            #yield scrapy.Request(request, callback=self.parsePages)
            #yield scrapy.Request(request, callback=lambda response, param=inputParam2:self.parsePages(response,param))
            yield scrapy.Request(request, callback=lambda response, param=inputParam2:self.parseContent(response,param))

    def parseJson(self, text):
        #---Parse Json---
        text = text
        pos = text.find("=")
        len = text.__len__()
        text = text[pos+1:len]
        j = json.loads(text, encoding='utf-8')
        return j


    #---读取每页的目录---
    def parseContent(self, response, param):
        print("Parse Content:" + param["RawCode"] + ":" + str(param["Page"]))

        try:
            j = self.parseJson(response.text)
            pages = int(j["pages"])
        except Exception,e:
            print(param["RawCode"] + " Can't Parse Json" )
            return

        i = 0
        for entry in j["data"]:
            i = i + 1
            #---cast full address---
            infoCode = entry["infoCode"]
            strDate = entry["datetime"][0:4] + entry["datetime"][5:7] +  entry["datetime"][8:10]
            url = "http://data.eastmoney.com/report/" + strDate + "/" + infoCode + ".html"
            inputParam = {}
            inputParam["Page"] = param["Page"]
            inputParam["i"] = i
            inputParam["URL"] = url
            inputParam["Issuer"] = entry["insName"]
            inputParam["Symbol"] = entry["secuFullCode"]
            inputParam["Rating"] = entry["rate"]
            inputParam["Change"] = entry["change"]
            inputParam["Title"] = entry["title"]
            inputParam["Author"] = entry["author"]
            inputParam["DateTime"] = entry["datetime"]

            reports = self.database.findWithFilter("Text","ResearchReport",{"Link":url})
            if reports.__len__() > 0:
                #print("Find in DataBase : " + entry["title"])
                kkwood = 1
            else:
                print("Request:"+entry["title"] + ":" +url)
                yield scrapy.Request(url, callback=lambda response, param=inputParam:self.parseArticle(response,param))


            #---For Test---
            #found = False
            #for report in self.reports:
            #    if entry["title"] == report["Title"]:
            #        found = True
            #        break
            #if not found:
            #    print("Can't Find Report "+ entry["title"] + " P " + str(param["Page"]))


        #---后面还有，翻页---
        if param["Page"] < pages:
            inputParam1 = {}
            inputParam1["RawCode"] = param["RawCode"]
            inputParam1["Page"] = param["Page"] + 1 #NexPage
            request = self.contentRequest1 + str(inputParam1["Page"]) + self.contentRequest2 + param["RawCode"] + self.contentRequest3
            yield scrapy.Request(request, callback=lambda response, param=inputParam1:self.parseContent(response,param))


    #---读取每个页面的内容---
    def parseArticle(self, response, param):

        print("Parse Article:" + param["Symbol"] + ":P:" + str(param["Page"]) + ":#:" + str(param["i"]) + ":" + param["Title"] + ":" + response._url)
        pageTitleSel = response.xpath('/html/head/title/text()').extract()

        #return

        reportTitle = response.xpath('//div[@class="report-title"]/h1/text()').extract()
        reportInfo = response.xpath('//div[@class="report-infos"]/span/text()').extract()
        rawReportContent = response.xpath('//div[@class="newsContent"]/*').extract()
        reportContent = ""
        for para in rawReportContent:
            para = para.replace("<p>","")
            para = para.replace("</p>","")
            if para == "":
                continue
            reportContent = reportContent + para + "\r\n"

        item = items.ResearchReportItem()
        item["Type"] = "ResearchReport"

        if param != None:
            item['Title'] = reportTitle[0]
            if param["Title"] !=  item['Title']:
                print("ParseError:Title Not Match")

            datetime1 = ProcessEastMoneyDateTime2(param["DateTime"])
            item['DateTime'] = ToDateTimeString(datetime1)

            item["Symbol"] = param["Symbol"]
            item['Link'] = param["URL"]
            item['Issuer'] = param['Issuer']
            item['Author'] = ProcessEastMoneyAuthor(param['Author'])

        else:
            item['Title'] = reportTitle[0]
            item['Link'] = response._url

            datetime1 = ProcessEastMoneyDateTime(reportInfo[0])
            item['DateTime'] = ToDateTimeString(datetime1)

            if reportInfo.__len__() == 3:
                item['Issuer'] = reportInfo[1]
                item['Author'] = ProcessEastMoneyAuthor(reportInfo[2])
            elif reportInfo.__len__() == 2:
                item['Author'] = ProcessEastMoneyAuthor(reportInfo[1])

        item["Rating"] = param["Rating"]
        item["Change"] = param["Change"]
        item['StdDateTime'] = ToUTCDateTime(datetime1)
        item['Content'] = reportContent
        yield item

