#coding=utf-8
import scrapy
from Core import MongoDB

import Gadget27
import items
import pipelines

#东方财富网爬虫
#行业报告，按照行业分类爬取

class eastMoneySpider(scrapy.Spider):

    name = "emIndustryReportByIndustry"

    #设定启动页面，也可以在 start_requests 函数中修改
    allowed_domains = ["eastmoney.com"]
    start_urls = [
        "http://quote.eastmoney.com/center/BKList.html"
        #http://data.eastmoney.com/report/hyyb.html
    ]

    readByMixExtry = True

    #---读目录的入口语句---
    industryRequest = "http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKHY&sty=FPGBKI&st=c&sr=-1&p=1&ps=5000&cb=&js=var%20BKCache=[(x)]&token=7bc05d0d4c3c22ef9fca8c2a912d779c&v=0.5252923853621492"

    #---分行业入口---
    #http://data.eastmoney.com/report/456yb.html
    contentRequest1 = "http://datainterface.eastmoney.com//EM_DataCenter/js.aspx?type=SR&sty=HYSR&sc="#Industry code
    contentRequest2 = "&js=var%20yuowehah={%22data%22:[(x)],%22pages%22:%22(pc)%22,%22update%22:%22(ud)%22,%22count%22:%22(count)%22}&ps=25&p="#page
    contentRequest3 = "&mkt=0&stat=0&rt=49133293"

    #---混合入口---
    #http://data.eastmoney.com/report/hyyb.html
    mixRequest1 = "http://datainterface.eastmoney.com//EM_DataCenter/js.aspx?type=SR&sty=HYSR&mkt=0&stat=0&cmd=2&code=&sc=&ps=50&p=" #page
    mixRequest2 = "&js=var%20oWPEzKPb={%22data%22:[(x)],%22pages%22:%22(pc)%22,%22update%22:%22(ud)%22,%22count%22:%22(count)%22}&rt=49142486"

    custom_settings = {
        'ITEM_PIPELINES' : {
        'pipelines.MongoDBPipeline': 0 #和开头的引用要匹配呼应
        },
        "COOKIES_ENABLED" : False,
        "DOWNLOAD_DELAY" : 1,
        "PROXIES" : [
          {'ip_port': '119.4.172.166:80', 'user_pass': ''},
          {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ]
    }

    def start_requests(self):

        self.database = MongoDB.MongoDB("192.168.1.90", "27017")
        self.instruments = self.database.findAll("Instruments","Stock")

        print("Start To Crawl")
        print("Existing settings: %s" % self.settings.attributes.keys())
        for url in self.start_urls:
            yield self.make_requests_from_url(url)


    #---Download Page 入口---
    def parse(self, response):

        if self.readByMixExtry:
            #http://data.eastmoney.com/report/hyyb.html 每日更新 以这里为入口
            startPage = 1
            endPage = 1
            #---Loop Page---
            for i in range(startPage, endPage +1):
                inputParam = {}
                inputParam["Page"] = i
                page = i
                mixRequest = self.mixRequest1 + str(page) + self.mixRequest2
                print("Parse Content " + str(page))
                yield scrapy.Request(mixRequest, callback=lambda response, param=inputParam:self.parseContent(response,param))
        else:
            #"http://quote.eastmoney.com/center/BKList.html" 分行业 以这里为入口
            #yield self.make_requests_from_url(self.industryRequest)
            yield scrapy.Request(self.industryRequest, callback = self.byIndustryEntry)


    def byIndustryEntry(self, response):
        print("Parse Extry")

        j = Gadget27.ParseJson(response.text)

        startIndex = 0
        endIndex = 99 #0-99  Max Industry
        maxIndex = j.__len__()
        if endIndex+1 > maxIndex:
            endIndex = maxIndex-1
        #---Loop Industry---
        for i in range(startIndex, endIndex+1):
            infos = j[i].split(",")
            industryCode = infos[1][-3:]
            industry = infos[2]
            #Start Request from Page 1
            request = self.contentRequest1 + industryCode + self.contentRequest2 + "1" + self.contentRequest3

            inputParam2 = {}
            inputParam2["Industry"] = industry
            inputParam2["IndustryCode"] = industryCode
            inputParam2["Page"] = 1
            yield scrapy.Request(request, callback=lambda response, param=inputParam2:self.parseContent(response,param))


    #---By Industry 读取每页的目录---
    def parseContent(self, response, param):

        if self.readByMixExtry:
            print("Parse Content:" + ":" + str(param["Page"]))
        else:
            print("Parse Content:" + param["Industry"] + ":" + str(param["Page"]))

        try:
            j = Gadget27.ParseJson(response.text)
            pages = int(j["pages"])
        except Exception as e:
            print(param["Industry"] + " Can't Parse Json" )
            return

        i = 0
        for entry in j["data"]:
            i = i + 1
            infos = entry.split(",")
            #---cast full address---

            inputParam = {}
            inputParam["Page"] = param["Page"]
            inputParam["i"] = i
            inputParam["Change"] = infos[0]
            inputParam["DateTime"] = Gadget27.ToDateTime(infos[1])
            infoCode = infos[2]
            inputParam["Symbol"] = infos[3]
            inputParam["Issuer"] = infos[4]
            inputParam["Rating"] = infos[7]
            inputParam["Rating2"] = infos[8]
            title = infos[9].strip()
            title = title.replace("&sbquo;",",")
            inputParam["Title"] = title
            inputParam["Industry"] = infos[10]
            #inputParam["IndustryCode"] = param["IndustryCode"]

            dt = Gadget27.ToDateTimeString(inputParam["DateTime"])
            key = title + "_" + inputParam["Issuer"] + "_" + dt

            strDate = Gadget27.ToDateString2(inputParam["DateTime"])
            url = "http://data.eastmoney.com/report/" + strDate + "/hy," + infoCode + ".html"
            inputParam["URL"] = url

            #
            reports = self.database.findWithFilter("Text","IndustryReport",{"Link":url})
            if reports.__len__() > 0:
                #print("Find in DataBase : " + key)
                kkwood = 1
            else:
                print("Request:"+ inputParam["Title"] + ":" +url)
                yield scrapy.Request(url, callback=lambda response, param=inputParam:self.parseArticle(response,param))

        #if Entry by mix 不用翻页
        if self.readByMixExtry:
            return


        #---后面还有，翻页---
        if param["Page"] < pages:
            inputParam1 = {}
            inputParam1["Industry"] = param["Industry"]
            #inputParam1["IndustryCode"] = param["IndustryCode"]
            inputParam1["Page"] = param["Page"] + 1 #NexPage
            request = self.contentRequest1 + param["IndustryCode"] + self.contentRequest2 + str(inputParam1["Page"]) + self.contentRequest3
            yield scrapy.Request(request, callback=lambda response, param=inputParam1:self.parseContent(response,param))


    #---读取每个页面的内容---
    def parseArticle(self, response, param):

        print("Parse Article:" + param["Symbol"] + ":P:" + str(param["Page"]) + ":#:" + str(param["i"]) + ":" + param["Title"] + ":" + response._url)
        pageTitleSel = response.xpath('/html/head/title/text()').extract()

        #return
        reportTitle = response.xpath('//div[@class="report-title"]/h1/text()').extract()
        reportInfo = response.xpath('//div[@class="report-infos"]/span/text()').extract()
        rawReportContent = response.xpath('//div[@class="newsContent"]/*').extract()
        reportContent = ""
        for para in rawReportContent:
            para = para.replace("<p>","")
            para = para.replace("</p>","")
            if para == "":
                continue
            reportContent = reportContent + para + "\r\n"

        item = items.IndustryReportItem()
        item["Type"] = "IndustryReport"

        item['Title'] = reportTitle[0].strip()
        if param["Title"] !=  item['Title']:
            print("Error, ParseError:Title Not Match : " + item['Title'])

        item['DateTime'] = Gadget27.ToDateTimeString(param["DateTime"])
        item['StdDateTime'] = Gadget27.ToUTCDateTime(param["DateTime"])

        if reportInfo.__len__() == 3:
            item['Author'] = Gadget27.ProcessEastMoneyAuthor(reportInfo[2])
        elif reportInfo.__len__() == 2:
            item['Author'] = Gadget27.ProcessEastMoneyAuthor(reportInfo[1])

        item["Symbol"] = param["Symbol"]
        item['Link'] = param["URL"]
        item['Issuer'] = param['Issuer']

        item["Rating"] = param["Rating"]
        item["Rating2"] = param["Rating2"]
        item["Industry"] = param["Industry"]
        item["Change"] = param["Change"]

        item['Content'] = reportContent
        yield item


    #---Mixed 读取每页的目录---
    def parseMixContent(self, response, param):

        print("Parse Mixed Content")
        try:
            j = Gadget27.ParseJson(response.text)
            pages = int(j["pages"])
        except Exception as e:
            print(param["Industry"] + " Can't Parse Json" )
            return

        for entry in j["data"]:
            #---cast full address---
            infoCode = entry["infoCode"]
            strDate = entry["datetime"][0:4] + entry["datetime"][5:7] +  entry["datetime"][8:10]
            url = "http://data.eastmoney.com/report/" + strDate + "/" + infoCode + ".html"
            inputParam = {}
            inputParam["URL"] = url
            inputParam["Issuer"] = entry["insName"]
            inputParam["Symbol"] = entry["secuFullCode"]
            inputParam["Rating"] = entry["rate"]
            inputParam["Change"] = entry["change"]
            inputParam["Title"] = entry["title"]
            inputParam["Author"] = entry["author"]
            inputParam["DateTime"] = entry["datetime"]

            #print("Open Page " + url)
            #url = "http://data.eastmoney.com/report/20160310/APPH2yNiK9wuASearchReport.html"
            #url = "http://data.eastmoney.com/report/20160310/APPH2yNiK9f9ASearchReport.html"
            #url = "http://data.eastmoney.com/report/20160310/APPH2yNiK9xMASearchReport.html"
            #url = "http://data.eastmoney.com/report/20160310/APPH2yNiK9xmASearchReport.html"

            #yield self.make_requests_from_url(url)
            print("Request:"+entry["title"] + ":" +url)
            yield scrapy.Request(url, callback=lambda response, param=inputParam:self.parseArticle(response,param))

        #---Next Page---
        #scrapy.Request(url, self.parseNextPage)
