#coding:utf-8
# import re,urllib3
# from bs4 import BeautifulSoup,SoupStrainer
import re
from pandas.io.sql import table_exists
import requests
# import sys
# import os
import time
import numpy as np
import pandas as pd
from lxml import etree 
import pandas as pd
import threading
from threading import Thread
from queue import Queue
from requests.exceptions import ConnectTimeout,ConnectionError,ReadTimeout,SSLError,MissingSchema,ChunkedEncodingError,ContentDecodingError
import pymysql

class f10_sstock:
    def __init__(self,count):
        self.count=count
        self.url="http://quotes.money.163.com/f10/gdfx_%s.html"%self.count.zfill(6)
        print(self.url,"\n")
        headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"}        
        try:
            response = requests.get(self.url, headers= headers)    
            # print(response.status_code,"\n")
        except ConnectionError or ContentDecodingError:
            print(u"网络不稳定 正在重试")
            time.sleep(3)
            response = requests.get(self.url, headers= headers) 
        html=etree.HTML(response.text)
        self.f10=html.xpath('//*[@id="ltdateTable"]/table/tr/td/text()')
        if len(self.f10)==40:
            nn=np.asarray(self.f10).reshape(len(self.f10)//4,4)
            nn=np.insert(nn, 0, values=[self.count]*10, axis=1)
            self.df = pd.DataFrame(nn,dtype="str",columns=["股票代码","十大流通股东","持有比例","本期持有股_万股","持股变动数_万股"]) 
        else:
            self.f10=None        
            self.df = pd.DataFrame(dtype="str",columns=["股票代码","十大流通股东","持有比例","本期持有股_万股","持股变动数_万股"])   
    
    def check(self):
        """
        确认出f10中有新进且无减持的,df新增1列f10，值为bull量
        """
        str_con=''.join(self.df["持股变动数_万股"].to_list())
        if "减持" not in str_con and "新进" in str_con:
            return True
        else:
            return False

def sql_to_df(table_name):
    from sqlalchemy import create_engine
    DB_STRING = 'mysql+pymysql://root:kirin@localhost:3306/stock?charset=utf8'
    engine = create_engine(DB_STRING)
    sql = ''' select * from {}; '''.format(table_name)
    # read_sql_query的两个参数: sql语句， 数据库连接
    df = pd.read_sql_query(sql, engine)
    return df

def f10_into_df(df,index_):
    code=df.loc[index_,'code']
    fs=f10_sstock(code)
    f10=fs.f10
    if f10 is not None:
        df.loc[index_,'f10']= fs.check()      
    else:
        df.loc[index_,'f10']=None 
    return "%s complete"%code


# 先删除之前生成的f10数据表，创建新表，方便用append
def del_old_and_create_table(table_name):
    
    db = pymysql.connect(
            host='localhost',
            db='stock',
            user='root',
            passwd='kirin',
            charset='utf8',
            use_unicode=True)

    # del_table = ["f10"]
    # for i in del_table:
    #     sql = "DROP TABLE IF EXISTS {}".format(i)
    #     try:
    #         cursor.execute(sql)
    #         print('table：{}--已被删除'.format(i))
    #     except Exception as e:
    #         print(e)

    cursor = db.cursor()
    #删除原有的f10数据表
    cursor.execute("DROP TABLE IF EXISTS {}".format(table_name))
    #创建空的f10数据表
    cursor.execute("CREATE TABLE {} (序号 int PRIMARY KEY AUTO_INCREMENT,股票代码 TEXT,十大流通股东 TEXT,持有比例 TEXT,本期持有股_万股 TEXT,持股变动数_万股 TEXT)".format(table_name))



def df2sql(df,index_,table_name):   
    #pymysql,它的线程安全描述为1，对应的pep249里面详细描述：Threads may share the module,but not connections
    # connections不能线程共享，所以每个线程都有各自连接
    from sqlalchemy import create_engine
    DB_STRING = 'mysql+pymysql://root:kirin@localhost:3306/stock?charset=utf8'
    engine = create_engine(DB_STRING)
    code=df.loc[index_,'code']
    fs=f10_sstock(code)
    f10=fs.f10
    if f10 is not None:
        #方式1
        fs.df.to_sql(table_name,con=engine,if_exists="append",index=False)
        #方式2  
        # for row in fs.df.itertuples():
        #     c1=getattr(row,"股票代码")
        #     c2=getattr(row,"十大流通股东")
        #     c3=getattr(row,"持有比例")
        #     c4=getattr(row,"本期持有股_万股")
        #     c5=getattr(row,"持股变动数_万股")
        #     cursor.execute("INSERT INTO f10 (股票代码,十大流通股东,持有比例,本期持有股_万股,持股变动数_万股) VALUES (%s,%s,%s,%s,%s)",(c1,c2,c3,c4,c5))
        #     提交事务
        #     db.commit()
        #     关闭连接
        #     db.close()
    return "{} {} complete".format(index_,code)

#https://www.youtube.com/watch?v=IEEhzQoKtQU

"""
使用for循环执行100个线程，并使用阻塞join
import time
import threading

start = time.perf_counter()

threads=[]
for i in range(100):
    t = threading.Thread(target=f10_into_df,args=(i,))
    t.start()
    print("thread%s is started"%i)
    threads.append(t)

for thread in threads:
    thread.join()
    print("thread%s is joined"%thread)

finish = time.perf_counter()
print(f"Finished in {round(finish-start,2)} second(s)")
df.to_csv("./Generate_file/after_process.csv")
"""


def multi_thread_df2sql(df,code_index_list,table_name):
    """
    使用线程池，由系统自动分配线程数量，并打印线程目标函数的返回结果
    """  
    #在append之前先删除原数据表
    del_old_and_create_table(table_name)

    import concurrent.futures
    with concurrent.futures.ThreadPoolExecutor() as executor:
        #所有code_index_list的f10都写入
        results=[ executor.submit(df2sql,df,i,table_name) for i in code_index_list]
        for f in concurrent.futures.as_completed(results):
            print(f.result())  



def multi_thread_f10_into_df(df,code_index_list):
    
    #使用map方法执行，容易产生Max retries exceeded HTTPConnectionPool ConnectionError
    import concurrent.futures
    with concurrent.futures.ThreadPoolExecutor() as executor:     
        # results=executor.map(f10_into_df,code_index_list)
        results=[ executor.submit(f10_into_df,df,i) for i in code_index_list]
        # for result in results:#<Future at 0x1669cf023a0 state=pending>
        #     print(result)
        for f in concurrent.futures.as_completed(results):
            print(f.result())
    
    

if __name__=="__main__":
    df=sql_to_df("main_sme")
    code_index_list=df["index"].values #.values or .to_list()
    df.to_csv("./Generate_file/before_process.csv")
    df['code'] = df['symbol'].map(lambda datai: str(datai.split('.')[1]).zfill(6)) 
    #f10 True or Flase列写入df
    multi_thread_f10_into_df(df,code_index_list)
    #把df写入csv
    df.to_csv("./Generate_file/after_process.csv")
    #把df写入数据表
    multi_thread_df2sql(df,code_index_list,"f10")
   
    #增加了f10一列为True/False/None的df
    df=pd.read_csv("./Generate_file/after_process.csv")
    df_f10_ACC=df.query("f10==True")
    #增加了f10一列为True的df_f10_ACC
    df_f10_ACC.to_csv("./Generate_file/df_f10_ACC.csv")
    #f10一列为True的index
    f10_acc_code_index_list=df_f10_ACC["index"].values
    #新的数据表，只含有f10为True
    table_name="f10_acc"
    #把增持且没有减持的股票列表写入数据库stock的数据表中
    multi_thread_df2sql(df_f10_ACC,f10_acc_code_index_list,table_name)
    #包含f10中增持且没有减持的完整股东表df_acc
    df_acc=sql_to_df(table_name)
    df_acc.to_csv("./Generate_file/df_f10_ACC_full_table.csv")
    # print(code_index_list)

