#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import time
import datetime
import requests
import re
import pymysql
from bs4 import BeautifulSoup
import logging
from pathlib import Path
import pandas as pd
import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import platform
from requests.adapters import HTTPAdapter
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=3))
s.mount('https://', HTTPAdapter(max_retries=3))

import sys, os
path = os.path.abspath('../..')+'/ark_project'
sys.path.append(path)  # 会追加到列表最尾部
from settings import mysql_settings
settings=mysql_settings.mysqlSettings()
settings=settings.mysql_conf
MYSQL_HOST=settings['MYSQL_HOST']
MYSQL_PORT=settings['MYSQL_PORT']
MYSQL_DB=settings['MYSQL_DB']
MYSQL_USER=settings['MYSQL_USER']
MYSQL_PASSWD=settings['MYSQL_PASSWD']
MYSQL_CHARTSET=settings['MYSQL_CHARTSET']

class ark_fundflow_spider():
    def __init__(self,is_crawl=True,log_rank=logging.WARNING):
        self.my_user_agent = mysql_settings.MyUserAgent().my_user_agent
        self.curr_path = os.getcwd() + '/' + 'ark_fundflow/'
        self.is_crawl=is_crawl
        self.log_rank=log_rank
        self.start_date_str = str(time.strftime('%Y-%m-%d', time.localtime()))
        self.headers = {
            "User-Agent": self.my_user_agent,
            'Connection': 'close'}
        self.logger=self.log_process()

        self.ark_list = ['arkf', 'arkg', 'arkk', 'arkq', 'arkw', 'arkx', 'prnt', 'izrl']

        # 定义存储ark fundflow的文件夹地址
        if not os.path.exists(self.curr_path+self.start_date_str): os.makedirs(self.curr_path+self.start_date_str)
        self.ark_fundflow_html_path = self.curr_path+self.start_date_str + '/'

        if not os.path.exists(self.curr_path+'fundflow_csv'): os.makedirs(self.curr_path+'fundflow_csv')
        self.ark_fundflow_csv_path = self.curr_path+'fundflow_csv' + '/'

        self.fundflow_item_dicts = {}
        self.per_day_data = {
            'arkf_cashinout': 'None', 'arkg_cashinout': 'None', 'arkk_cashinout': 'None', 'arkq_cashinout': 'None',
            'arkw_cashinout': 'None', 'arkx_cashinout': 'None', 'prnt_cashinout': 'None', 'izrl_cashinout': 'None',
            'arkf_close': 'None','arkg_close': 'None','arkk_close': 'None','arkq_close': 'None',
            'arkw_close': 'None','arkx_close': 'None','prnt_close': 'None','izrl_close': 'None',
        }

        self.connect = pymysql.connect(
            host=MYSQL_HOST,
            port=MYSQL_PORT,
            database=MYSQL_DB,
            user=MYSQL_USER,
            password=MYSQL_PASSWD,
            charset=MYSQL_CHARTSET
        )
        # 使用cursor()方法获取操作游标
        self.cursor = self.connect.cursor()

        self.chrome_options = Options()  # 模拟器设置
        self.chrome_options.add_argument('start-maximized')  # 指定浏览器分辨率
        self.chrome_options.add_argument('--disable-gpu')  # 谷歌文档提到需要加上这个属性来规避bug
        self.chrome_options.add_argument('--hide-scrollbars')  # 隐藏滚动条, 应对一些特殊页面
        self.chrome_options.add_argument('blink-settings=imagesEnabled=false')  # 不加载图片, 提升速度
        self.chrome_options.add_argument('-headless')  # 无界面设置
        self.chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
        self.chrome_options.add_experimental_option('useAutomationExtension', False)
        self.chrome_options.add_argument('--ignore-certificate-errors')
        self.chrome_options.add_argument(
            "user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36")

        if platform.system().lower() == 'windows':
            # print("windows")
            self.driver = webdriver.Chrome(chrome_options=self.chrome_options,
                                           executable_path='C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe')
        elif platform.system().lower() == 'linux':
            # print("linux")
            self.driver = webdriver.Chrome(chrome_options=self.chrome_options,
                                           executable_path='/usr/local/bin/chromedriver')

    def new_run(self):
        self.update_date()
        for ark_type in self.ark_list:
            self.crawl_cashinout(ark_type)
            self.caculate_fundvalue(ark_type)
            self.crawl_close(ark_type)

        if self.fundflow_item_dicts:
            #获取文件名中的日期,取最新日期
            a = [datetime.datetime.strptime(d, '%Y-%m-%d') for d in self.fundflow_item_dicts.keys()]
            maz_ = max(d for d in a if isinstance(d, datetime.date))
            maz_ = maz_.strftime('%Y-%m-%d')

            csv_file_name=self.ark_fundflow_csv_path+'ark_fundflow_'+maz_+'.csv'
            self.save_data(self.fundflow_item_dicts, csv_file_name)

        self.driver.close()

    def crawl(self,url,ark_type,web):
        # 为防止多次爬取对网站造成负担，打开网站后对内容做存储，如果当天文件存在则不再进行二次抓取
        if not os.path.exists(self.ark_fundflow_html_path + web+"_"+ark_type + '_web_html.txt'):
            self.driver.implicitly_wait(10)  # 隐性等待，最长等10秒
            self.driver.get(url)
            text=self.driver.page_source
            # text = s.get(url=url, headers=self.headers, verify=False, timeout=8).content.decode('utf-8')
            # print(text)
            with open(self.ark_fundflow_html_path + web+"_"+ark_type + '_web_html.txt', 'w', encoding='utf-8') as f:
                f.write(text)
            self.logger.info(u'=====%s抓取当日更新数据成功====', ark_type)
            # 爬虫间隔5秒
            time.sleep(5)

    def log_process(self):
        if not os.path.exists(self.curr_path+'logs'): os.makedirs(self.curr_path+'logs')

        log_path=self.curr_path+'logs/' + Path(__file__).name.replace('.py', '') + '_%s.log' % self.start_date_str
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)  # or whatever
        handler = logging.FileHandler(log_path, 'w', 'utf-8')  # or whatever
        handler.setFormatter(logging.Formatter('%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'))  # or whatever
        logger.addHandler(handler)

        return logger

    @staticmethod
    def get_list(date):
        return datetime.datetime.strptime(date, "%Y-%m-%d").timestamp()

    def update_date(self):
        """从ark_holdings取最新的5个日期，在ark_fundvaluecashintout新增不存在日期数据条"""
        #从ark_holdings取最新的5个日期
        sql_get_holedate = "SELECT DISTINCT date from ark_holdings ORDER BY date Desc LIMIT 7;"
        self.cursor.execute(sql_get_holedate)
        self.ark_holdings_date=self.cursor.fetchall()
        self.ark_holdings_date=[t[0] for t in self.ark_holdings_date]
        self.ark_holdings_date =self.ark_holdings_date[3:]  #存ark_holdings最新的第4-7个日期
        # print('==self.ark_holdings_date',self.ark_holdings_date)
        self.logger.info('==self.ark_holdings_date=%s',str(self.ark_holdings_date))

        #在ark_fundvaluecashintout新增不存在日期数据条
        sql_get_fundvalue_date="SELECT DISTINCT date from ark_fundvaluecashintout ORDER BY date Desc LIMIT 7;"
        self.cursor.execute(sql_get_fundvalue_date)
        self.ark_fundvalue_date=self.cursor.fetchall()
        self.ark_fundvalue_date=[t[0] for t in self.ark_fundvalue_date]

        self.logger.info('==self.ark_fundvalue_date:%s',str(self.ark_fundvalue_date))

        ark_holdings_date_set=set(self.ark_holdings_date)
        ark_fundvalue_date_set=set(self.ark_fundvalue_date)

        self.ark_holdings_date_alone=ark_holdings_date_set-ark_fundvalue_date_set
        self.ark_holdings_date_alone = list(self.ark_holdings_date_alone)
        #日期排序
        self.ark_holdings_date_alone = sorted(self.ark_holdings_date_alone, key=lambda date: self.get_list(date))
        # self.ark_holdings_date_alone.sort()
        # print('==self.ark_holdings_date_alone=',self.ark_holdings_date_alone)
        self.logger.info('==self.ark_holdings_date_alone=%s',self.ark_holdings_date_alone)
        for new_date in self.ark_holdings_date_alone:
            try:
                sql = "insert into ark_fundvaluecashintout(date) values(%s)"
                self.cursor.execute(sql, (new_date))
                self.connect.commit()
            except Exception as e:
                self.logger.error(e)
                self.logger.warning('===ark_fundvaluecashintout新增日期数据%s失败===',new_date)

        self.altogether_date=list(set(self.ark_holdings_date+self.ark_fundvalue_date))  #这次所有涉及的日期，存储这些日期的数据
        #日期排序
        self.altogether_date = sorted(self.altogether_date, key=lambda date: self.get_list(date))
        for per in self.altogether_date:
            self.fundflow_item_dicts[per]=self.per_day_data

    def crawl_cashinout(self,ark_type):
        """
        fundflow（cashinout），etfdb.com抓取，日期从近到远，取5个数据，
        如果date存在，而且这个数据为null，则update，如果没找到对应的date，不做处理
        """
        web='etfdb'
        url = 'https://etfdb.com/etf/' + ark_type

        # 如果数据没有爬过且有源码存档，则爬取
        if self.is_crawl:
            self.crawl(url, ark_type,web)

        # 直接读取本地存储的txt文件
        with open(self.ark_fundflow_html_path + web+"_"+ark_type + '_web_html.txt', 'r', encoding='utf-8') as f:
            html = f.read()
            soup = BeautifulSoup(html, 'lxml')

            # 正则筛选
            pattern = re.compile('\[\d{13}.*?\]', re.S)
            fundflow_str = pattern.findall(html)
            # 获取的fundflow_str格式为['[1414972800000, 0.0]', '[1415059200000, 0.0]', '[1415145600000, 0.0]',...

            # 处理爬取的日期和资金流数据，及存入mysql数据库
            # 打开数据库连接

            date_fundflow_dict={}#类型str:float
            for oneday_fundflow_str in fundflow_str:
                # 将1414972800000改为date格式
                date = time.strftime("%Y-%m-%d", time.localtime(int(oneday_fundflow_str[1:14]) / 1000))
                # 将0.0改为float格式，并将单位改为美元，之前是百万美元
                fundflow = float(oneday_fundflow_str[16:-1]) * 1000000000
                date_fundflow_dict[date]=fundflow

            for new_date in self.ark_fundvalue_date:
                sql_check="select %s from ark_fundvaluecashintout where date='%s'"%(ark_type + "_cashinout",new_date)
                self.cursor.execute(sql_check)
                date_fundflow=self.cursor.fetchone()[0]
                if date_fundflow is None and new_date in date_fundflow_dict.keys():
                    new_date_fundflow=date_fundflow_dict[new_date]
                    sql = "UPDATE ark_fundvaluecashintout set " + ark_type + "_cashinout=" + str(
                        new_date_fundflow) + " where date=\'" + new_date + "\'"
                    try:
                        # 执行sql语句
                        self.cursor.execute(sql)
                        # 提交到数据库执行
                        self.connect.commit()
                        self.logger.info('%s_cashinout数据更新成功！！！',ark_type)
                        self.fundflow_item_dicts[new_date][ark_type + "_cashinout"] = new_date_fundflow  # 对数据库改动存入数据备份
                    except Exception as e:
                        self.logger.error('%s_cashinout数据更新失败...',ark_type)
                        self.logger.error(e)
                        # 如果发生错误则回滚
                        self.connect.rollback()

                    # update 加总combined_cashinout资金申赎数据
                    sql = "UPDATE ark_fundvaluecashintout set combined_cashinout=if(arkf_cashinout is null,0,arkf_cashinout)" \
                          "+if(arkg_cashinout is null,0,arkg_cashinout)" \
                          "+if(arkk_cashinout is null,0,arkk_cashinout)" \
                          "+if(arkq_cashinout is null,0,arkq_cashinout)" \
                          "+if(arkw_cashinout is null,0,arkw_cashinout)" \
                          "+if(arkx_cashinout is null,0,arkx_cashinout)" \
                          "+if(prnt_cashinout is null,0,prnt_cashinout)" \
                          "+if(izrl_cashinout is null,0,izrl_cashinout)" \
                          "where date=\'" + new_date + "\'"
                    try:
                        # 执行sql语句
                        self.cursor.execute(sql)
                        # 提交到数据库执行
                        self.connect.commit()
                        self.logger.info('cashinout_combined数据更新成功！！！')
                        self.cursor.execute(
                            "select combined_cashinout from ark_fundvaluecashintout where date=\'" + new_date + "\'")
                        combined_cashinout = self.cursor.fetchone()[0]
                        self.fundflow_item_dicts[new_date]['combined_cashinout'] = combined_cashinout
                    except Exception as e:
                        self.logger.error('cashinout_combined数据更新失败...')
                        self.logger.error(e)
                        # 如果发生错误则回滚
                        self.connect.rollback()

            #存储每个etf的cashinout的数据
            for per_date in self.altogether_date:
                if per_date in date_fundflow_dict.keys():
                    self.fundflow_item_dicts[per_date][ark_type+'_cashinout']=date_fundflow_dict[per_date]


    def caculate_fundvalue(self,ark_type):
        """
        fundvalue，通过ark_holdings表计算，日期从近到远，取5个数据，
        如果date存在，而且这个数据为null，则update，如果没找到对应的date，不做处理
        """
        for each_date in self.ark_fundvalue_date:
            sql_check="select %s from ark_fundvaluecashintout where date='%s'"%(ark_type + "_fundvalue",each_date)
            self.cursor.execute(sql_check)
            fundvalue=self.cursor.fetchone()[0]
            if fundvalue is None:
                sql = "select sum(value_usd) from (select * from ark_holdings where date=\'" + each_date + "\' and fund=\'" + ark_type.upper() + "\')t"
                # print(sql) #检查正确select sum(value_usd) from (select * from ark_holdings where date='2021-05-06' and fund='ARKF')t
                try:
                    # 执行SQL语句
                    self.cursor.execute(sql)
                    # 获取对应日期记录列表datercdnum
                    sumcddatefundvaluenum = self.cursor.fetchone()[0]
                    # print(cddatefundvaluenum) #检查结果正确 3901068215.71875

                    # update mysql相应ETF对应日期资金规模数据
                    sql = "UPDATE ark_fundvaluecashintout set " + ark_type + "_fundvalue=" + str(
                        sumcddatefundvaluenum) + " where date=\'" + each_date + "\'"
                    try:
                        # 执行sql语句
                        self.cursor.execute(sql)
                        # 提交到数据库执行
                        self.connect.commit()
                        self.logger.info('%s_fundvalue更新成功',ark_type)
                        self.fundflow_item_dicts[each_date][ark_type + "_fundvalue"] = fundvalue
                    except Exception as e:
                        # 如果发生错误则回滚
                        self.connect.rollback()
                        self.logger.error('%s_fundvalue更新失败',ark_type)
                        self.logger.error(e)
                        self.logger.error(sql)
                except Exception as e:
                    self.logger.error("Error: unable to fetch data")
                    self.logger.error(e)

                # update 加总combined_cashinout资金申赎数据
                sql = "UPDATE ark_fundvaluecashintout set combined_fundvalue=if(arkf_fundvalue is null,0,arkf_fundvalue)" \
                      "+if(arkg_fundvalue is null,0,arkg_fundvalue)" \
                      "+if(arkk_fundvalue is null,0,arkk_fundvalue)" \
                      "+if(arkq_fundvalue is null,0,arkq_fundvalue)" \
                      "+if(arkw_fundvalue is null,0,arkw_fundvalue)" \
                      "+if(arkx_fundvalue is null,0,arkx_fundvalue)" \
                      "+if(prnt_fundvalue is null,0,prnt_fundvalue)" \
                      "+if(izrl_fundvalue is null,0,izrl_fundvalue)" \
                      "where date=\'" + each_date + "\'"
                try:
                    # 执行sql语句
                    self.cursor.execute(sql)
                    # 提交到数据库执行
                    self.connect.commit()
                    self.logger.info('combined_fundvalue更新成功！！！')
                    self.fundflow_item_dicts[each_date][ark_type + "_fundvalue"] = fundvalue
                except Exception as e:
                    # 如果发生错误则回滚
                    self.connect.rollback()
                    self.logger.error('combined_fundvalue更新失败...')
                    self.logger.error(e)

    def crawl_nasdaq(self,ark_type,json_file_path):
        today_date = str(datetime.datetime.today().date())
        if ark_type == 'arkx':
            url = 'https://api.nasdaq.com/api/quote/ARKX/historical?assetclass=stocks&fromdate=2021-05-03&limit=18&todate=' + today_date
        else:
            url = 'https://api.nasdaq.com/api/quote/' + ark_type.upper() + '/historical?assetclass=etf&fromdate=2021-05-03&limit=18&todate=' + today_date

        try:
            self.driver.implicitly_wait(10)  # 隐性等待，最长等10秒
            self.driver.get(url)
            datas = self.driver.find_element_by_tag_name('pre').text
            json_data = json.loads(datas)
            data_data=json_data['data']
            if data_data!=None:
                with open(json_file_path, 'w', encoding='utf-8') as f:
                    f.write(datas)
                self.logger.info('=====%s抓取%s更新数据成功====', ark_type, today_date)
            else:
                #带有今天日期的链接请求数据失败，用带有昨天日期的链接请求数据
                today = datetime.datetime.today()
                # 昨天
                yesterday = today - datetime.timedelta(days=1)
                yesterday_str = str(yesterday.date())
                if ark_type == 'arkx':
                    yesterday_url = 'https://api.nasdaq.com/api/quote/ARKX/historical?assetclass=stocks&fromdate=2021-05-03&limit=18&todate=' + yesterday_str
                else:
                    yesterday_url = 'https://api.nasdaq.com/api/quote/' + ark_type.upper() + '/historical?assetclass=etf&fromdate=2021-05-03&limit=18&todate=' + yesterday_str

                try:
                    self.driver.implicitly_wait(10)  # 隐性等待，最长等10秒
                    self.driver.get(yesterday_url)
                    datas = self.driver.find_element_by_tag_name('pre').text
                    json_data = json.loads(datas)
                    data_data = json_data['data']
                    if data_data != None:
                        with open(json_file_path, 'w', encoding='utf-8') as f:
                            f.write(datas)
                        self.logger.info('=====%s抓取%s更新数据成功====', ark_type, yesterday_str)
                except Exception as e:
                    self.logger.error(e)
                    self.logger.error('===%s %s nasdaq数据抓取失败', ark_type, yesterday_str)
        except Exception as e:
            self.logger.error(e)
            self.logger.error('===%s %s nasdaq数据抓取失败',ark_type,today_date)

    def crawl_close(self,ark_type):
        """
        price，nasdaq抓取日期从近到远，取5个数据，
        如果date存在，而且这个数据为null，则update，如果没找到对应的date，不做处理
        https://www.nasdaq.com/market-activity/funds-and-etfs/arkk/historical
        https://api.nasdaq.com/api/quote/watchlist?symbol=arkx%7cstocks&symbol=arkf%7cetf&symbol=arkw%7cetf&symbol=izrl%7cetf&symbol=prnt%7cetf&symbol=arkq%7cetf&symbol=arkg%7cetf&symbol=arkk%7cetf&type=Rv
        """
        web='nasdaq'
        today_date=str(datetime.datetime.today().date())
        json_file_path=self.ark_fundflow_html_path + web + "_" + ark_type + '_web_json.txt'
        if self.is_crawl:
            self.crawl_nasdaq(ark_type,json_file_path)

        if os.path.exists(json_file_path):
            fin_data=open(json_file_path,'r',encoding='utf-8').read()
            self.logger.info('===从文件中读取nasdaq的json数据 %s %s成功===',ark_type,today_date)

            json_data = json.loads(fin_data)
            row_data = json_data['data']['tradesTable']['rows']
            date_closevalue_dict = {}
            for each_data in row_data:
                date=each_data['date']
                new_date = str(datetime.datetime.strptime(date, '%m/%d/%Y').date())
                close_value=each_data['close'].replace('$','')
                date_closevalue_dict[new_date] = close_value

            for each_date in self.ark_fundvalue_date:
                sql_check="select %s from ark_fundvaluecashintout where date='%s'"%(ark_type+'_close',each_date)
                self.cursor.execute(sql_check)
                select_check_res=self.cursor.fetchone()[0]
                if each_date in date_closevalue_dict.keys() and select_check_res is None:
                    each_date_close_value=date_closevalue_dict[each_date]
                    sql_update="UPDATE ark_fundvaluecashintout set %s=%s where date='%s';"%(ark_type+'_close',each_date_close_value,each_date)
                    self.cursor.execute(sql_update)
                    self.connect.commit()
                    self.logger.info('==%s:%s_close->%s数据更新成功',each_date,ark_type,each_date_close_value)

                    # update 加权平均combined_close资金申赎数据
                    sql_close_combined = """update ark_fundvaluecashintout set combined_close=(if(arkf_close is null,0,arkf_close)*if(arkf_fundvalue is null,0,arkf_fundvalue)
                                                    +if(arkg_close is null,0,arkg_close)*if(arkg_fundvalue is null,0,arkg_fundvalue)
                                                    +if(arkk_close is null,0,arkk_close)*if(arkk_fundvalue is null,0,arkk_fundvalue)
                                                    +if(arkq_close is null,0,arkq_close)*if(arkq_fundvalue is null,0,arkq_fundvalue)
                                                    +if(arkw_close is null,0,arkw_close)*if(arkw_fundvalue is null,0,arkw_fundvalue)
                                                    +if(arkx_close is null,0,arkx_close)*if(arkx_fundvalue is null,0,arkx_fundvalue)
                                                    +if(prnt_close is null,0,prnt_close)*if(prnt_fundvalue is null,0,prnt_fundvalue)
                                                    +if(izrl_close is null,0,izrl_close)*if(izrl_fundvalue is null,0,izrl_fundvalue))
                                                    /(
                                                        if(arkf_fundvalue is null,0,arkf_fundvalue)
                                                        +if(arkg_fundvalue is null,0,arkg_fundvalue)
                                                        +if(arkk_fundvalue is null,0,arkk_fundvalue)
                                                        +if(arkq_fundvalue is null,0,arkq_fundvalue)
                                                        +if(arkw_fundvalue is null,0,arkw_fundvalue)
                                                        +if(arkx_fundvalue is null,0,arkx_fundvalue)
                                                        +if(prnt_fundvalue is null,0,prnt_fundvalue)
                                                        +if(izrl_fundvalue is null,0,izrl_fundvalue)) where date='%(date)s';
                                """%dict(date=each_date)
                    try:
                        # 执行sql语句
                        self.cursor.execute(sql_close_combined)
                        # 提交到数据库执行
                        self.connect.commit()
                        self.logger.info('close_combined数据更新成功！！！')
                        self.fundflow_item_dicts[each_date][ark_type+'_close'] = each_date_close_value

                        sql = "select combined_close from ark_fundvaluecashintout where date='%s'" % each_date
                        self.cursor.execute(sql)
                        combined_close = self.cursor.fetchone()[0]
                        self.fundflow_item_dicts[each_date]['combined_close'] = combined_close
                    except Exception as e:
                        self.logger.error('close_combined数据更新失败...')
                        self.logger.error(e)
                        # 如果发生错误则回滚
                        self.connect.rollback()

            # 存储每个etf的close的数据
            for per_date in self.altogether_date:
                if per_date in date_closevalue_dict.keys():
                    self.fundflow_item_dicts[per_date][ark_type+'_close']=date_closevalue_dict[per_date]
        else:
            self.logger.error('===nasdaq的json数据备份文件中不存在 %s %s 的备份===',ark_type,today_date)

    def save_data(self,fundflow_item_dicts,csv_file_name):
        columm_order=list(self.per_day_data.keys())
        print('columm_order=',columm_order)
        print('fundflow_item_dicts=',fundflow_item_dicts)
        df=pd.DataFrame(fundflow_item_dicts).T
        #保持字段顺序
        df[columm_order].to_csv(csv_file_name)

    def __del__(self):
        self.cursor.close()
        self.connect.close()

if __name__=='__main__':
    fundflow_spider=ark_fundflow_spider(is_crawl=False,log_rank=logging.INFO)
    fundflow_spider.new_run()




