# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import numpy as np
import pandas as pd
import datetime
today=datetime.datetime.now().strftime('%Y-%m-%d')

a=[]

def valueNull(x):
  if str(type(x))=="<class 'float'>":
    return x
  elif x[-1]=='%':
    x=x.replace(',','')
    return float(x[:-1])
  elif x[-1]=='次':
    return float(x[:-1])
  elif x[-2:]=='万亿':
    x=x.replace(',','')
    return float(x[:-2])*100000000*10000
  elif x[-1]=='万':
    x=x.replace(',','')
    return float(x[:-1])*10000
  elif x[-1]=='亿':
    try:
      x=x.replace(',','')
      return float(x[:-1])*100000000
    except ValueError:
      print(x)
      a.append(x)
  elif x[-1] in '0123456789':
    x=x.replace(',','')
    return float(x)
  elif x=='--':
    return 0.000001
  else:
    a.append(x)

class StockAnalistPipeline(object):
    def open_spider(self,spider):
      print('开启爬虫')
      self.line_jszb=[]
      self.line_zjtj1=[]
      self.ads_jszb=[]
      self.ads_zyzb=[]
      self.ads_dbfx=[]
      self.ads_zjtj1=[]
      self.ads_news=[]
      self.i=0

    def process_item(self, item, spider):
      if item['type']=='ads_jszb':
        self.i+=1
        print(self.i)
        self.ads_jszb.append(item)
        return item
      elif item['type']=='line_jszb':
        self.line_jszb.append(item)
        return item
      elif item['type']=='line_zjtj1':
        self.line_zjtj1.append(item)
        return item
      elif item['type']=='ads_zyzb':
        for p,v in item.items():
          if p=='date' or p=='code' or p=='name' or p=='s_code' or p=='s_name' or p=='type':
            continue
          else:
            item[p]=valueNull(item[p])
        self.ads_zyzb.append(item)
        return item
      elif item['type']=='ads_dbfx':
        for p,v in item.items():
          if p=='date' or p=='code' or p=='name' or p=='s_code' or p=='s_name' or p=='type':
            continue
          else:
            item[p]=valueNull(item[p])
        self.ads_dbfx.append(item)
        return item
      elif item['type']=='ads_zjtj1':
        self.ads_zjtj1.append(item)
        return item
      elif item['type']=='ads_news':
        self.ads_news.append(item)
        return item

    def close_spider(self,spider):
      print('爬虫结束函数')
      print('ads_news的长度:',len(self.ads_news))
      flag=False
      if len(self.line_jszb)>0:
        flag=True
        line_jszb=pd.DataFrame(self.line_jszb).to_csv('./datas/line_jszb_'+today+'.csv',encoding='utf_8_sig',header=None,index=None,)
      if len(self.line_zjtj1)>0:
        flag=True
        line_zjtj1=pd.DataFrame(self.line_zjtj1).to_csv('./datas/line_zjtj1_'+today+'.csv',encoding='utf_8_sig',header=None,index=None,)
      if len(self.ads_jszb)>0:
        flag=True
        ads_jszb=pd.DataFrame(self.ads_jszb).to_csv('./datas/ads_jszb_'+today+'.csv',encoding='utf_8_sig',header=None,index=None,)
      if len(self.ads_zyzb)>0:
        flag=True
        ads_zyzb=pd.DataFrame(self.ads_zyzb).to_csv('./datas/ads_zyzb_'+today+'.csv',encoding='utf_8_sig',header=None,index=None,)
      if len(self.ads_dbfx)>0:
        flag=True
        ads_dbfx=pd.DataFrame(self.ads_dbfx).to_csv('./datas/ads_dbfx_'+today+'.csv',encoding='utf_8_sig',header=None,index=None,)
      if len(self.ads_zjtj1)>0:
        flag=True
        ads_zjtj1=pd.DataFrame(self.ads_zjtj1).to_csv('./datas/ads_zjtj1_'+today+'.csv',encoding='utf_8_sig',header=None,index=None,)
      if len(self.ads_news)>0:
        flag=True
        ads_news=pd.DataFrame(self.ads_news).to_csv('./datas/ads_news_'+today+'.csv',encoding='utf_8_sig',header=None,index=None,)
      if flag==True:
        f=open('./datas/update.txt','w')
        f.write('1')
        f.close()
      print('未处理的数据有：',a)
    	
