import time
import argparse
import pandas as pd
import os
from  tqdm import tqdm
from crawl_function import crawl_pdf,m_download_pdf,m_outer
import multiprocessing
import threading
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor
import datetime
def task(num):
    print("执行",num)

def parse_args():
    parser = argparse.ArgumentParser(description="加入分表参数")
    parser.add_argument('--count', default=0, help="用于区分不同表格的爬虫")
    args = parser.parse_args()
    return args
def seperate_code(stkcd):
    return str(stkcd)[0:6]
def standard_code(num):
    return str(int(num))
def merge_annoncement(path='.\\公告文件\\基本信息'):
    """
    将按代码和年份爬取的公告合并
    :param path:
    :return:
    """
    dirs=os.listdir(path)
    print(dirs)
    data=pd.DataFrame()
    num=0
    for dir in dirs:
        files=os.listdir(path+'\\'+dir)
        for file in files:
            data=pd.concat([data,pd.read_csv(path+'\\'+dir+'\\'+file)],axis=0)
            num+=1
            print(num)
            if num%1000==0:
                # data.columns=['index','stkcd','year','ID','title']
                data['code']=data.apply(lambda x: seperate_code(x['stocks']), axis=1)
                data.to_csv('.\\公告文件\\合并公告信息\\全部公告汇总{}.csv'.format(str(num // 1000)),index=False)
                data=pd.DataFrame()
if __name__=="__main__":
    # merge_annoncement()
    detail_url = 'http://www.cninfo.com.cn/new/announcement/bulletin_detail'
    stocklist = pd.read_csv(r'.\公告文件\清理后股票代码.csv')
    # print(stocklist.head())
    args=parse_args()
    for  i in range(len(stocklist)):
        code=str(stocklist.loc[i,'code']).zfill(6)
        if not os.path.exists('./公告文件/pdf全文'+'/'+str(code)):
            os.makedirs('./公告文件/pdf全文'+'/'+str(code))
    lock=threading.RLock()
    files=os.listdir('.//公告文件//合并公告信息')
    files=['.//公告文件//合并公告信息//'+file for file in files]
    count = int(args.count)
    while count<len(files):
        data=pd.read_csv(files[int(count)])
        data=data.dropna()
        now=datetime.datetime.now()
        if now.hour>=0 :
            pool = ThreadPoolExecutor(20)
            for i in range(len(data)):
                url,filename=data.iloc[i,8],data.iloc[i,9]
                fur=pool.submit(m_download_pdf,url)
                fur.add_done_callback(m_outer(filename))
            pool.shutdown()
        else:
            print("未到爬取时间")
            tm_hour,tm_min,tm_sec=time.localtime().tm_hour,\
                time.localtime().tm_min,\
                time.localtime().tm_sec
            duration=(22-tm_hour-1)*3600+(60-tm_sec)+(59-tm_min)*60
            print("现在是{}点,距离开始爬取还有{}小时".format(tm_hour,duration//3600))
            time.sleep(duration)
            pool = ThreadPoolExecutor(20)
            for i in range(len(data)):
                url, filename = data.iloc[i, 8], data.iloc[i, 9]
                fur = pool.submit(m_download_pdf, url)
                fur.add_done_callback(m_outer(filename))
            pool.shutdown()
        count += 1

    # for i in range(len(data)):
    #     announcementID, stock, title, url=data.loc[i,'ID'],data .loc[i,'code'],data.loc[i,'title'],detail_url
    #     savePath='./公告文件/pdf全文'+'\\'+str(stock)
    #     if not os.path.exists(savePath):
    #         os.makedirs(savePath)
    #     filename = savePath + '\\' + str(announcementID) + str(title) + '.pdf'
    #     m_download_pdf(data.loc[i, 'ID'], data.loc[i, 'code'], data.loc[i, 'title'], detail_url,lock)
    #     #fur.add_done_callback(m_outer(filename))
    #
    #     # print(data.loc[i, 'ID'], data.loc[i, 'code'], data.loc[i, 'title'])
    #     # crawl_pdf(data.loc[i,'ID'],data.loc[i,'code'],data.loc[i,'title'],detail_url,args.count,lock)
    #     # data.apply(lambda data:crawl_pdf(data['ID'],data['code'],data['title'],detail_url,lock,args.count),axis=1)


