# -*- coding: utf-8 -*-
import scrapy
import re
import numpy as np
import datetime
today=datetime.datetime.now().strftime('%Y-%m-%d')


class NewdataSpider(scrapy.Spider):
    name = 'newData'
    allowed_domains = ['47.push2.eastmoney.com','34.push2his.eastmoney.com','push2his.eastmoney.com','push2.eastmoney.com','f10.eastmoney.com']
    start_urls = ['http://47.push2.eastmoney.com/api/qt/clist/get?cb=jQuery112408737642003131711_1593568028065&pn=1&pz=61&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f3&fs=m:90+t:2+f:!50&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152,f124,f107,f104,f105,f140,f141,f207,f208,f209,f222&_=1593568028066']

    def parse(self, response):
        line_jszb='http://34.push2his.eastmoney.com/api/qt/stock/kline/get?cb=jQuery33108107647143769106_1593576915923&secid=90.{}&ut=fa5fd1943c7b386f172d6893dbfba10b&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58%2Cf59%2Cf60%2Cf61&klt=101&fqt=1&end=20500101&lmt=1200&_=1593576915928'
        line_zjtj1='http://push2his.eastmoney.com/api/qt/stock/fflow/daykline/get?lmt=0&klt=101&secid=90.{}&fields1=f1,f2,f3,f7&fields2=f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61&ut=b2884a393a59ad64002292a3e90d46a5&cb=jQuery18303752088725795446_1593653471213&_=1593653471429'
        line_sub_stock='http://push2.eastmoney.com/api/qt/clist/get?pn=1&pz=10000&po=1&np=1&ut=b2884a393a59ad64002292a3e90d46a5&fltt=2&invt=2&fid=f62&fs=b:{}&stat=1&fields=f12,f14,f2,f3,f62,f184,f66,f69,f72,f75,f78,f81,f84,f87,f204,f205,f124&rt=53119268&cb=jQuery18309961237446586966_1593577744280&_=1593578049674'
        data=response.body.decode()
        li=re.findall('(?<="diff":)\[.*?\]',data)[0]
        data=eval(li)
        yield scrapy.Request('http://finance.eastmoney.com/a/cpljh.html',callback=self.parseNextPage,meta={'page':1},dont_filter=True)
        for i in range(len(data)):
            code=data[i]['f12']#行业代码
            name=data[i]['f14']#行业名称
            yield scrapy.Request(line_jszb.format(code),callback=self.LINE_JSZB,meta={"code":code,"name":name})
            yield scrapy.Request(line_zjtj1.format(code),callback=self.LINE_ZJTJ1,meta={"code":code,"name":name})
            yield scrapy.Request(line_sub_stock.format(code),callback=self.LINE_SUB_STOCK,meta={"code":code,"name":name})

    def LINE_JSZB(self,response):
        code=response.meta["code"]
        name=response.meta["name"]
        data=response.body.decode()
        li=re.findall('(?<="klines":)\[.*?\]',data)[0]
        data=eval(li)
        if data[-1][:10]==today:
            temp=data[-1].split(',')
            item={
                'code':code,
                'name':name,
                'date':temp[0],
                'ads_kpjg':temp[1],
                'ads_spjg':temp[2],
                'ads_zgj':temp[3],
                'ads_zdj':temp[4],
                'ads_cjl':temp[5],#成交量
                'ads_cje':temp[6],#成交额
                'ads_zf':temp[7],#振幅
                'ads_zdf':temp[8],#涨跌幅
                'ads_zde':temp[9],#涨跌额
                'ads_hsl':temp[10],#换手率
                'type':'line_jszb'
                }
            yield item

    def LINE_ZJTJ1(self, response):
        code=response.meta["code"]
        name=response.meta["name"]
        data=response.body.decode()
        li=re.findall('(?<="klines":)\[.*?\]',data)[0]
        data=eval(li)
        if data[-1][:10]==today:
            temp=data[-1].split(',')
            item={
                'code':code,
                'name':name,
                'date':temp[0],
                'ads_zljlrje':temp[1],#主力净流入净额
                'ads_xdjlrje':temp[2],#小单净流入净额
                'ads_zdjlrje':temp[3],#中单净流入净额
                'ads_ddjlrje':temp[4],#大单净流入净额
                'ads_cddjlrje':temp[5],#超大单净流入净额
                'ads_zljlrjzb':temp[6],#主力净流入净占比
                'ads_xdjlrjzb':temp[7],#小单净流入净占比
                'ads_zdjlrjzb':temp[8],#中单净流入净占比
                'ads_ddjlrjzb':temp[9],#大单净流入净占比
                'ads_cddjlrjzb':temp[10],#超大单净流入净占比
                'type':'line_zjtj1'
                }
            yield item

    def LINE_SUB_STOCK(self,response):
        ads_jszb='http://push2his.eastmoney.com/api/qt/stock/kline/get?fields1=f1,f2,f3,f4,f5&fields2=f51,f52,f53,f54,f55,f56,f57,f58,f59,f60&fqt=0&end=29991010&ut=fa5fd1943c7b386f172d6893dbfba10b&cb=jQuery18306524721086329908_1593578370605&klt=101&secid=0.{}&fqt=1&lmt=1000&_=1593578371246'
        ads_zyzb='http://f10.eastmoney.com/NewFinanceAnalysis/MainTargetAjax?type=0&code=SZ{}'
        ads_dbfx='http://f10.eastmoney.com/NewFinanceAnalysis/DubangAnalysisAjax?code=SZ{}'
        ads_zjtj1='http://push2his.eastmoney.com/api/qt/stock/fflow/daykline/get?lmt=0&klt=101&secid=0.{}&fields1=f1,f2,f3,f7&fields2=f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f62,f63&ut=b2884a393a59ad64002292a3e90d46a5&cb=jQuery18307800788713541649_1593670010232&_=1593670010897'
        code=response.meta["code"]
        name=response.meta["name"]
        data=response.body.decode()
        li=re.findall('(?<="diff":)\[.*?\]',data)[0]
        data=eval(li)
        for i in range(len(data)):
            s_code=data[i]['f12']
            s_name=data[i]['f14']
            yield scrapy.Request(ads_jszb.format(s_code),callback=self.ADS_JSZB,meta={"code":code,"name":name,'s_code':s_code,'s_name':s_name})
            yield scrapy.Request(ads_zyzb.format(s_code),callback=self.ADS_ZYZB,meta={"code":code,"name":name,'s_code':s_code,'s_name':s_name})
            yield scrapy.Request(ads_dbfx.format(s_code),callback=self.ADS_DBFX,meta={"code":code,"name":name,'s_code':s_code,'s_name':s_name})
            yield scrapy.Request(ads_zjtj1.format(s_code),callback=self.ADS_ZJTJ1,meta={"code":code,"name":name,'s_code':s_code,'s_name':s_name})

    def ADS_JSZB(self,response):
        code=response.meta["code"]
        name=response.meta["name"]
        s_code=response.meta["s_code"]
        s_name=response.meta["s_name"]
        data=response.body.decode()
        try:
            li=re.findall('(?<="klines":)\[.*?\]',data)[0]
        except IndexError:
            return
        data=eval(li)
        try:
            thisDate=data[-1][:10]
        except IndexError:
            return
        if data[-1][:10]==today:
            temp=data[-1].split(',')
            item={
                'code':code,
                'name':name,
                's_code':s_code,
                's_name':s_name,
                'date':temp[0],
                'ads_kpjg':temp[1],
                'ads_spjg':temp[2],
                'ads_zgj':temp[3],
                'ads_zdj':temp[4],
                'ads_cjl':temp[5],#成交量
                'ads_cje':temp[6],#成交额
                'ads_zf':temp[7],#振幅
                'ads_zdf':temp[8],#涨跌幅
                'ads_zde':temp[9],#涨跌额
                'type':'ads_jszb'
                }
            yield item

    def ADS_ZYZB(self,response):
        code=response.meta["code"]
        name=response.meta["name"]
        s_code=response.meta["s_code"]
        s_name=response.meta["s_name"]
        data=response.body.decode()
        try:
            li=re.findall('\[.*?\]',data)[0]
        except IndexError:
            return
        data=eval(li)
        if data[0]['date']==today:
            item=data[0]
            item['code']=code
            item['name']=name
            item['s_code']=s_code
            item['s_name']=s_name
            item['type']='ads_zyzb'
            yield item

    def ADS_DBFX(self,response):
        code=response.meta["code"]
        name=response.meta["name"]
        s_code=response.meta["s_code"]
        s_name=response.meta["s_name"]
        data=response.body.decode()
        try:
            li=re.findall('\[.*?\]',data)[0]
        except IndexError:
            return
        data=eval(li)
        if data[0]['date']==today:
            item=data[0]
            item['code']=code
            item['name']=name
            item['s_code']=s_code
            item['s_name']=s_name
            item['type']='ads_dbfx'
            yield item
                
    def ADS_ZJTJ1(self, response):
        code=response.meta["code"]
        name=response.meta["name"]
        s_code=response.meta["s_code"]
        s_name=response.meta["s_name"]
        data=response.body.decode()
        try:
            li=re.findall('(?<="klines":)\[.*?\]',data)[0]
        except IndexError:
            return
        data=eval(li)
        if data[-1][:10]==today:
            temp=data[-1].split(',')
            item={
                'code':code,
                'name':name,
                's_code':s_code,
                's_name':s_name,
                'date':temp[0],
                'ads_zljlrje':temp[1],#主力净流入净额
                'ads_xdjlrje':temp[2],#小单净流入净额
                'ads_zdjlrje':temp[3],#中单净流入净额
                'ads_ddjlrje':temp[4],#大单净流入净额
                'ads_cddjlrje':temp[5],#超大单净流入净额
                'ads_zljlrjzb':temp[6],#主力净流入净占比
                'ads_xdjlrjzb':temp[7],#小单净流入净占比
                'ads_zdjlrjzb':temp[8],#中单净流入净占比
                'ads_ddjlrjzb':temp[9],#大单净流入净占比
                'ads_cddjlrjzb':temp[10],#超大单净流入净占比
                'ads_spj':temp[11],#收盘价
                'ads_zdf':temp[12],#涨跌幅
                'type':'ads_zjtj1'
                }
            yield item
            
    def parseNextPage(self, response):
        #新闻列表页的url
        url='http://finance.eastmoney.com/a/cpljh_{}.html'
        #获取当前页数
        page=response.meta["page"]
        print(page)
        #先定位到目录列表的ul
        ul=response.xpath('//ul[@id="newsListContent"]')[0]
        #查找到所有新闻的url
        hrefs=ul.xpath('./li//a/@href').extract()
        #因为这些href会重复一次，所以遍历的时候要跳过一个
        for i in range(0,len(hrefs),2):
            yield scrapy.Request(hrefs[i],callback=self.parseContent,dont_filter=True)
        #查看是否有下一页
        texts=response.xpath('//a[@target="_self"]/text()').extract()
        if '下一页' in texts:
            yield scrapy.Request(url.format(page+1),callback=self.parseNextPage,meta={'page':page+1},dont_filter=True)

    def parseContent(self, response):
        #获取到包含头部和正文内容的div
        div=response.xpath('//div[@class="newsContent"]')[0]
        #获取标题
        title=div.xpath('.//h1/text()').extract()[0]
        #获取时间
        timeString=div.xpath('.//div[@class="time"]/text()').extract()[0]
        year=timeString[:4]
        month=timeString[5:7]
        day=timeString[8:10]
        date=str(year)+'-'+str(month)+'-'+str(day)
        #如果不是今天的消息就跳过
        if date!=today:
            return
        TIME=timeString[-5:]#用大写是为了和time区分开
        #获取作者
        try:
            authors=div.xpath('.//div[@class="author"]/text()').extract()[0][3:].split(' ')
            if len(authors)==0 or authors[0]=='':
                author=div.xpath('.//div[@class="author"]/a/text()').extract()[0]#有的作者用链接显示的
            else:
                author=""
                for auth in authors:
                    author+=(auth+';')
                author=author[:-1]
        except IndexError:
            try:
                author=div.xpath('.//div[@class="author"]/a/text()').extract()[0]#有的作者用链接显示的
            except IndexError:
                author='佚名'
        #来源，即本报道隶属于什么组织
        try:
            source=div.xpath('.//div[@class="source data-source"]/@data-source').extract()[0]
        except IndexError:
            source='佚'
        #获取正文
        ps=div.xpath('.//p/text()').extract()
        content=""
        for p in ps:
            content+=p
        item={
            'ads_title':title,
            'ads_date':date,
            'ads_time':TIME,
            'ads_author':author,
            'ads_source':source,
            'ads_content':content,
            'type':'ads_news',
            }
        yield item
