import os
import time
import datetime
import requests
import pymysql
import pandas as pd
import io
import logging
from pathlib import Path
from requests.adapters import HTTPAdapter

import sys, os
path = os.path.abspath('../..')+'/ark_project'
sys.path.append(path)  # 会追加到列表最尾部
from settings import mysql_settings
settings=mysql_settings.mysqlSettings()
settings=settings.mysql_conf
MYSQL_HOST=settings['MYSQL_HOST']
MYSQL_PORT=settings['MYSQL_PORT']
MYSQL_DB=settings['MYSQL_DB']
MYSQL_USER=settings['MYSQL_USER']
MYSQL_PASSWD=settings['MYSQL_PASSWD']
MYSQL_CHARTSET=settings['MYSQL_CHARTSET']

class ark_webholding_spider():
    def __init__(self,is_crawl=True,log_rank=logging.WARNING):
        self.my_user_agent=mysql_settings.MyUserAgent().my_user_agent
        self.curr_path = os.getcwd() + '/' + 'ark_webholding/'
        self.is_crawl=is_crawl
        self.log_rank=log_rank
        self.start_date_str = str(time.strftime('%Y-%m-%d', time.localtime()))
        self.logger=self.log_process()
        self.arkholding_path=self.curr_path+"/arkholding"
        if not os.path.exists(self.arkholding_path): os.makedirs(self.arkholding_path)
        self.ark_holding_html_address = self.arkholding_path + "/"

        # ark各ETFcsv下载网址
        arkf_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_FINTECH_INNOVATION_ETF_ARKF_HOLDINGS.csv"
        arkg_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_GENOMIC_REVOLUTION_MULTISECTOR_ETF_ARKG_HOLDINGS.csv"
        arkk_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_INNOVATION_ETF_ARKK_HOLDINGS.csv"
        arkq_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_AUTONOMOUS_TECHNOLOGY_&_ROBOTICS_ETF_ARKQ_HOLDINGS.csv"
        arkw_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_NEXT_GENERATION_INTERNET_ETF_ARKW_HOLDINGS.csv"
        arkx_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_SPACE_EXPLORATION_&_INNOVATION_ETF_ARKX_HOLDINGS.csv"
        print3d_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/THE_3D_PRINTING_ETF_PRNT_HOLDINGS.csv"
        israel_url = "https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_ISRAEL_INNOVATIVE_TECHNOLOGY_ETF_IZRL_HOLDINGS.csv"

        urls = [arkf_url, arkg_url, arkk_url, arkq_url, arkw_url, arkw_url, print3d_url, israel_url]
        # ark各ETF属性字典
        self.ark_etf = [{'name': 'ARKF', 'url': arkf_url, 'file': 'ARK_FINTECH_INNOVATION_ETF_ARKF_HOLDINGS.csv'},
                   {'name': 'ARKG', 'url': arkg_url,
                    'file': 'ARK_GENOMIC_REVOLUTION_MULTISECTOR_ETF_ARKG_HOLDINGS.csv'},
                   {'name': 'ARKK', 'url': arkk_url, 'file': 'ARK_INNOVATION_ETF_ARKK_HOLDINGS.csv'},
                   {'name': 'ARKQ', 'url': arkq_url,
                    'file': 'ARK_GENOMIC_REVOLUTION_MULTISECTOR_ETF_ARKG_HOLDINGS.csv'},
                   {'name': 'ARKW', 'url': arkw_url, 'file': 'ARK_NEXT_GENERATION_INTERNET_ETF_ARKW_HOLDINGS.csv'},
                   {'name': 'ARKX', 'url': arkx_url,
                    'file': 'ARK_SPACE_EXPLORATION_&_INNOVATION_ETF_ARKX_HOLDINGS.csv'},
                   {'name': 'PRNT', 'url': print3d_url, 'file': 'THE_3D_PRINTING_ETF_PRNT_HOLDINGS.csv'},
                   {'name': 'IZRL', 'url': israel_url,
                    'file': 'ARK_ISRAEL_INNOVATIVE_TECHNOLOGY_ETF_IZRL_HOLDINGS.csv'}]

        # 需要处理的ticker
        ticker_dict = {'TREE UW': 'TREE', 'ARCT UQ': 'ARCT', 'TCS LI': 'TCS.IL', 'TAK UN': 'TAK',
                       '6618': '6618.HK', '8473': '8473.T', '3690': '3690.HK', '4689': '4689.T',
                       '6060': '6060.HK', '4477': '4477.T', '9923': '9923.HK', 'ADYEN': 'ADYEY',
                       'KSPI': 'KSPI.IL'}

        self.headers = {
            "User-Agent": self.my_user_agent,
            'Connection': 'close'}

    def log_process(self):
        if not os.path.exists(self.curr_path+'logs'):os.makedirs(self.curr_path+'logs')
        logging.basicConfig(level=self.log_rank,#控制台打印的日志级别
                            filename=self.curr_path+'logs/'+Path(__file__).name.replace('.py','')+'_%s.log'%self.start_date_str,
                            filemode='a',##模式，有w和a，w就是写模式，每次都会重新写日志，覆盖之前的日志
                            #a是追加模式，默认如果不写的话，就是追加模式
                            format=
                            '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
                            #日志格式
                            )
        logger=logging.getLogger(__name__)
        return logger

    def run(self):
        # # # 定义存储ark fundflow的文件夹地址，该参数部署时候需要改动，或者写在配置ini等文档中
        if self.is_crawl:
            # 获取ark官网持仓，将数据存储到本地，以日期命名Ark_holding_05-14-2021.csv
            self.getHolding(self.ark_holding_html_address)

        # 再从本地读取csv持仓Ark_holding_05-14-2021.csv，导入到mysql数据库ark_holdings(先删除该日期的，再增加）
        self.holding_to_mysql(self.ark_holding_html_address)

    # 定义全局变量，为下载持仓的日期
    global datestr

    # ark官网下载持仓函数，下载全部持仓到csv文件ark_holding_05-14-2021.csv
    def getHolding(self,ark_holdingshtmladdress):
        # csv 持仓列名（字段名）
        df = pd.DataFrame(columns=['date','fund','company','ticker','cusip','shares','market value($)','weight(%)'])
        global datestr
        for etf in self.ark_etf:
            # 获取web链接text
            try:
                text = requests.get(url=etf['url'], headers=self.headers,verify=False).content.decode('utf-8')
                df_temp = pd.read_csv(io.StringIO(text)).dropna()
                # 日期格式转换 将'5/12/2021'改为‘2021-05-12’
                datestr = self.transdatefomt2(df_temp['date'][0])
                df_temp['date'] = df_temp['date'].apply(self.transdatefomt)
                # 数据添加到df dataframe中
                df = df.append(df_temp)
                # 爬虫间隔5秒
                time.sleep(5)
            except Exception as e:
                self.logger.error('==%s holding web 下载失败%s',etf['name'],etf['url'])
                self.logger.error(e)

        # 将ark持仓数据下载至本地，
        df.to_csv(ark_holdingshtmladdress + 'Ark_holding_'+datestr+'.txt', header = None, index = False, sep= ',', mode= 'a' )
        df.to_csv(ark_holdingshtmladdress + 'Ark_holding_'+datestr+'.csv', index = False)

        # print('输入到ark_holdings csv和txt 完成')
        self.logger.info('输入到ark_holdings csv和txt 完成')


    ## 处理数据读取存储到mysql数据库
    def holding_to_mysql(self,ark_holdingshtmladdress):
        # datestr全局变量，主要区别ark_holding每日持仓，ark_holding_05-14-2021，采用此日期格式主要同之前的表一致
        global datestr
        ## 处理爬取的持仓数据，及存入mysql数据库
        # 打开数据库连接
        db = pymysql.connect(
            host=MYSQL_HOST,
            port=MYSQL_PORT,
            database=MYSQL_DB,
            user=MYSQL_USER,
            password=MYSQL_PASSWD,
            charset=MYSQL_CHARTSET)
        # 使用cursor()方法获取操作游标
        cursor = db.cursor()

        # 先删除mysql数据库中当日的数据，再更新
        # SQL 删除语句,如果要更新的数据表有2021-05-14数据，先删除，# transdatefomt3将05-14-2021格式改为了2021-05-14格式
        sql = "DELETE FROM ark_holdings WHERE date = \'%s\'" % (self.transdatefomt3(datestr))
        try:
            # 执行SQL语句
            cursor.execute(sql)
            # 提交修改
            db.commit()
            self.logger.info('===删除需要更新那日的数据，再更新====删除成功====')
        except Exception as e:
            # 发生错误时回滚
            db.rollback()
            self.logger.info(e)
            self.logger.info('===待更新那日数据没有，无需删除====')

        # 通过open（）方法以只读的方式打开文件，编码格式为UTF-8
        file = open(ark_holdingshtmladdress+'Ark_holding_'+datestr+'.csv', 'r', encoding='UTF-8')
        # 通过readlines（）方法读取文件的每一行赋值给lines
        lines = file.readlines()
        # 如果lines为真，执行循环的内容
        if lines:
            for line in lines:  # lines是一个列表，列表中的每隔元素就是txt文件中的一行数据，读取出来实际上是一个字符串
                line = line.strip('\n')
                line = line.split(",")
                # print(line)  # 这一步是为了验证是否能如期获取到列表
                date = line[0]  # 我的txt文件中每行三个元素，所以将三个元素分别赋值给变量abc
                fund = line[1]
                company = line[2]
                ticker = line[3]
                cusip = line[4]
                shares = line[5]
                value_usd = line[6]
                wgt_pct = line[7]
                # print((date,fund,company,ticker,cusip,shares,value_usd,wgt_pct))
                if date != 'date':
                    sql = "insert into ark_holdings(date,fund,company,ticker,cusip,shares,value_usd,wgt_pct) values(%s,%s,%s,%s,%s,%s,%s,%s)"  # 数据库数据插入语句
                    param = (date, fund, company, ticker, cusip, shares, value_usd, wgt_pct)  # param参数是要输入的数据
                    try:
                        # 执行sql语句
                        cursor.execute(sql, param)  # cursor.execute(sql,param)方法执行插入语句
                        db.commit()  # 提交
                        self.logger.info('---add a holding data----')
                    except Exception as e:
                        # 如果发生错误则回滚
                        db.rollback()
                        self.logger.warning(e)
                        self.logger.warning('===插入holding数据失败====')

        file.close()  # 关闭所有的连接
        cursor.close()
        db.close()

    # 日期格式转换函数，将5/14/2021改为2021-05-14
    @staticmethod
    def transdatefomt(date):
        # 去除首位空格
        data=datetime.datetime.strptime(date, "%m/%d/%Y").strftime('%Y-%m-%d')
        return data
    # 日期格式转换函数，将5/14/2021改为05-14-2021
    @staticmethod
    def transdatefomt2(date):
        # 去除首位空格
        data=datetime.datetime.strptime(date, "%m/%d/%Y").strftime('%m-%d-%Y')
        return data

    # 日期格式转换函数，将05-14-2021改为2021-05-14
    @staticmethod
    def transdatefomt3(date):
        # 去除首位空格
        data=datetime.datetime.strptime(date, "%m-%d-%Y").strftime('%Y-%m-%d')
        return data

if __name__=='__main__':
    holding_spider=ark_webholding_spider(is_crawl=True,log_rank=logging.INFO)
    holding_spider.run()
