import shutil

import pandas as pd
import os
import time
from crawl_function import cal_pageNum,get_idlist,m_get_idlist
from tqdm import tqdm
import multiprocessing
import argparse
from sqlalchemy import Table,Column,Integer,ForeignKey,String,Text,FLOAT,VARCHAR
from sqlalchemy import create_engine,MetaData
from sqlalchemy.dialects.mysql import LONGTEXT
def lam_fillURL(url):
    # print('full:','http://static.cninfo.com.cn'+'/'+str(url))
    return 'http://static.cninfo.com.cn'+'/'+str(url)
def single_crawl(stock,seDate,year,profile_url):
    try:
        pageNum = cal_pageNum(stock, seDate, url=profile_url)
        print(f'{stock[0:6]}在{seDate[0:4]}年有{pageNum}页公告')
    except Exception as e:
        if not os.path.exists(r'.\日志'):
            os.makedirs(r'.\日志')
        with open(r'.\日志\错误日志.txt', 'a', encoding='utf-8') as log:
            log.write(str(stock) + str(year) + str(e) + '\n')
    try:
        print('开始获取id')
        get_idlist(total_pages=pageNum, stock=stock, seDate=seDate, url=profile_url)
        time.sleep(0.1)
        print(f'{stock}爬取完成')
    except Exception as e:
        print(e)
        print('出错了！')
        if not os.path.exists(r'.\日志'):
            os.makedirs(r'.\日志')
        with open(r'.\日志\错误日志.txt', 'a', encoding='utf-8') as log:
            log.write(str(stock) + str(year) + str(e) + '\n')
def initial_crawl(profile_url,stocklist):
    for line in tqdm(range(len(stocklist))):
        code = str(stocklist.loc[line, 'code']).zfill(6)
        if int(code) >= 1:
            orgid = str(stocklist.loc[line, 'orgId'])
            stock = code + ',' + orgid
            for year in range(2007, 2023, 1):
                seDate = str(year) + '-01-01~' + str(year) + '-12-31'
                single_crawl(stock,seDate,year,profile_url)
def complement1(file='./公告文件/清理后股票代码.csv',
                dirpath='./公告文件/基本信息'):
    crawled_stocks=[int(stock[0:6]) for stock in os.listdir(dirpath)]
    stocks=pd.read_csv(file)
    codes=list(stocks['code'])
    orgIds=list(stocks['orgId'])
    items=dict(zip(codes,orgIds))
    for single_stock in codes:
        if single_stock not in crawled_stocks:
            orgId=items[single_stock]
            for year in range(2007, 2023, 1):
                stock=str(single_stock).zfill(6)+','+str(orgId)
                #print(stock)
                seDate = str(year) + '-01-01~' + str(year) + '-12-31'
                single_crawl(stock, seDate, year, profile_url)
    pass
def clean_pre(path):
    dirs = os.listdir(path)
    for i in tqdm(range(len(dirs))):
        files = os.listdir(path + '\\' + dirs[i])
        for file in files:
            data=pd.read_csv(path+'\\'+str(dirs[i])+'\\'+str(file))
            if len(data.columns)<=5:
                os.remove(path+'\\'+str(dirs[i])+'\\'+str(file))

def complement2(profile_url,path='./公告文件/基本信息'):
    dirs=os.listdir(path)
    years=list(range(2007,2023))
    for i in tqdm(range(len(dirs))):
        files=os.listdir(path+'\\'+dirs[i])
        if len(files)!=16:
            for year in years:
                if year not in files:
                    seDate=str(year) + '-01-01~' + str(year) + '-12-31'
                    single_crawl(stock=dirs[i],seDate=seDate,year=year,profile_url=profile_url)
    print(years)
def fill_url(file,savePath='./公告文件/pdf全文'):
    data=pd.read_csv(file)
    #print(data.columns)
    #print('full:', 'http://static.cninfo.com.cn' + '/' + url)
    if len(data)>0:
        data['fullurl']=data.apply(lambda x:lam_fillURL(x.finalurl),axis=1)
        data['filename']=data.apply(lambda x:lam_absoluteFilename(x['stocks'],x['ids'],x['shortTitle']),axis=1)
        data=data[['stocks','years','ids','titles','finalurl','shortTitle','fileType','annType','fullurl','filename']]
        data.to_csv(file,index=None)
    else:
        os.remove(file)

def lam_absoluteFilename(stock,id,shortTitle,savePath='./公告文件/pdf全文'):
    return savePath+'/'+str(int(stock[0:6])).zfill(6)+'/'+str(id)+str(shortTitle)+'.pdf'

def upload(engine,file):
    data=pd.read_csv(file)
    dtypedict = {

        'stocks': VARCHAR(30),

        'years': VARCHAR(5),

        'ids': VARCHAR(20),

        '公告类型': VARCHAR(150),

        'titles': VARCHAR(400),

        'finalurl':VARCHAR(1000),

        'shortTitle':VARCHAR(300),

        'fileType':VARCHAR(10),

        'annType':VARCHAR(200),

        'fullurl':VARCHAR(1000),

        'filename':VARCHAR(2000)
    }
    data.to_sql(name='base_imformation', con=engine, index=False, if_exists='append', dtype=dtypedict)
    pass
if __name__ == '__main__':
    profile_url='http://www.cninfo.com.cn/new/hisAnnouncement/query'
    detail_url='http://static.cninfo.com.cn'
    stocklist = pd.read_csv(r'.\公告文件\清理后股票代码.csv')
    path = '.\\公告文件\\基本信息'
    "首次获取时运行"
    # initial_crawl(profile_url=profile_url,stocklist=stocklist)
    engine=create_engine('mysql+pymysql://root:a123456a@localhost:3306/announce', echo=False)
    "删除之前爬取的"
    print('删除开始')
    # clean_pre(path)
    print('删除完成')
    "补充全部年度未爬取成功的"
    print('开始补充所有年未爬取的')
    # complement1()
    print('所有年份均缺失的补充完毕')

    "补充部分年度未爬取成功的"
    print('开始个别补充')
    # try:
    #     complement2(profile_url=profile_url)
    # except Exception as e:
    #     pass
    print('补充完成')
    "将url补充完整"
    try:
        dirs=os.listdir(path)
        for i in tqdm(range(len(dirs))):
            files = os.listdir(path + '\\' + dirs[i])
            for file in files:
                #print(path+'\\'+str(dirs[i])+'\\'+str(file))
                fill_url(path+'\\'+str(dirs[i])+'\\'+str(file))
                upload(engine=engine,file=path+'\\'+str(dirs[i])+'\\'+str(file))
    except Exception as E:
        print(path+'\\'+str(dirs[i])+'\\'+str(file))
        print(str(E))
        print('error')