import re

from convertPDF2txt import acrobat2txt
import threading
import pandas as pd
import argparse
import os
import datetime
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
import time

from  tqdm import tqdm
import numpy as np
from convertPDF2txt import pdf2txt
from bypy import ByPy
from sqlalchemy import create_engine,MetaData
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy import Table,Column,Integer,ForeignKey,String,Text,FLOAT,VARCHAR,BOOLEAN
import pymysql
def parse_args():
    parser = argparse.ArgumentParser(description="加入分表参数")
    parser.add_argument('--count', default=0, help="用于区分不同表格的爬虫")
    args = parser.parse_args()
    return args
def lam_txtfilename(pdffilename):
    if pdffilename[-4:]=='.pdf':
        fillename=pdffilename[:-4]+'.txt'
        filename=fillename.replace('pdf全文','txt全文')
        return filename
    else:
        return pdffilename
def do_pdf2txt(pdf,txt,data,i):
    if os.path.isfile(pdf):
        if not os.path.isfile(txt):
            text=pdf2txt(pdf, txt)
        else:
            with open(txt,'r',encoding='utf-8') as f:
                text=f.read()
            #print(text)'
        # with Lock():
        text=re.sub('([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])','',text)
        data.loc[i,'text']=text

        # try:
        #     upload2BDWP(filename=filename,cloudname=cloudname)
        # # print('')
        # # print('cccccccccccc')
        #
        # except:
        #     with open('./日志/百度网盘上传失败.txt','a',encoding='utf-8') as f:
        #         with Lock:
        #             f.write(txt+'\n')
        # try:
        #     up2sql(engine=engine,ID=ID,table=table,text=text,year=year,stkcd=stocks)
        # except:
        #     with open('./日志/sql上传失败.txt','a',encoding='utf-8') as f:
        #         with Lock:
        #             f.write(txt+'\n')




def upload2BDWP(filename,cloudname):
    bp = ByPy()
    bp.upload(filename,cloudname)
def up2sql(engine,ID,stkcd,table,text,year):
    text=text.replace('%','%%')
    # print(text)
    sentence1="select {}.text from announce.{} WHERE {}.ids={} and {}.stocks=\'{}\' and {}.years={}".format(table,table,table,ID,table,stkcd,table,year)
    sentence2="UPDATE {} SET text = \'{}\' WHERE {}.ids={} and {}.stocks=\'{}\' and {}.years={}".format(table,text,table,ID,table,stkcd,table,year)
    # print('sssss',sentence)
    with engine.connect() as con:
        result1=con.execute(sentence1)
        for row in result1:
            if row[0]=="无":
                con.execute(sentence2)
if __name__=="__main__":

    engine = create_engine('mysql+pymysql://root:a123456a@localhost:3306/announce', echo=False)
    stocklist = pd.read_csv(r'.\公告文件\清理后股票代码.csv')
    #print(stocklist.head())
    Lock=threading.RLock()
    for i in range(len(stocklist)):
        code = str(stocklist.loc[i, 'code']).zfill(6)
    files=os.listdir('.//公告文件//合并公告信息')
    files=['.//公告文件//合并公告信息//'+file for file in files]
    args = parse_args()
    bp=ByPy()
    count=4
    # while count < len(files):
    print(files[int(count)])
    for i in tqdm(range(4,len(files))):
        data = pd.read_csv(files[i])
        data = data.dropna()
        data['txtname']=data.apply(lambda x:lam_txtfilename(x.filename),axis=1 )
        data['text']=data.apply(lambda x:np.nan,axis=1)
        data['state']=data.apply(lambda x:True,axis=1)
        # print(data.columns)
        # print(data.head())
        table='全文{}.csv'.format(str(i))
        dtypedict = {

            'stocks': VARCHAR(30),

            'years': VARCHAR(5),

            'ids': VARCHAR(20),

            'titles': VARCHAR(400),

            'finalurl': VARCHAR(1000),

            'shortTitle': VARCHAR(300),

            'fileType': VARCHAR(10),

            'annType': VARCHAR(200),

            'fullurl': VARCHAR(1000),

            'filename': VARCHAR(2000),

            'code': VARCHAR(30),

            'txtname': VARCHAR(2000),

            'text':LONGTEXT(),

            'state': BOOLEAN(),
        }
        # if  not os.path.isfile('./公告文件/以爬取全文/全文{}.csv'.format(str(count))):
        #     data.to_csv('./公告文件/以爬取全文/全文{}.csv'.format(str(count)))
        try:
            data['text']=data.apply(lambda x:"无",axis=1)
            print(data['text'])
            # data.to_sql(name=table[:-4], con=engine, index=False, if_exists='fail ', dtype=dtypedict)
        except:
            print("表格已存在")
        pool = ThreadPoolExecutor(100)
        for i in range(len(data)):
            pdf, txt =data.iloc[i, 9],data.iloc[i,11]
            stocks,years,ids=data.iloc[i, 0],data.iloc[i,1],data.iloc[i,2]
            filename=txt.replace('./','E:/论文数据/企业公告数据/')
            cloudname=txt.replace('./','/')
            "单线程模式"
            do_pdf2txt(pdf,txt,data,i)
            # do_pdf2txt(pdf,txt,engine,ids,table[:-4],years,filename,cloudname,stocks)
        "多线程模式"
        #     pool.submit(do_pdf2txt,pdf,txt,data,i)
        # pool.shutdown()
        # data.to_sql('./公告文件/以爬取全文/全文{}.xlsx'.format(str(count)))


        conn=pymysql.connect(
            host='localhost',
            port=3306,
            user='root',
            password='a123456a',
            db='announce',
            charset='utf8mb4'
        )
        data.to_sql(name=table[:-4], con=engine, index=False, if_exists='replace', dtype=dtypedict)


        # try:
        #     do_pdf2txt(pdf=pdf,txt=txt,bp=bp,engine=engine,ID=ids,table=table[:-4],year=years,filename=filename,cloudname=cloudname,stocks=stocks)
        # except:
        #     with open('./日志/上传错误.txt','a',encoding='utf-8') as  f:
        #         f.write(str(stocks)+','+str(ids))
        #     pass

        # pool.submit(do_pdf2txt,data,pdf,txt)


    # print('转换结束')

