# -*- coding: utf-8 -*-
"""
Created on Sat Jun 25 09:03:21 2022
http://guba.eastmoney.com/list,000002,3,f.html
@author: 29165

<span class="zwtitlepdf">
<a href="https://pdf.dfcfw.com/pdf/H2_AN202301121581912665_1.pdf"><img src="/images/pdf.png"
 alt="" width="18" height="19">查看PDF原文</a></span>
"""
import os.path
#from tkinter.filedialog import SaveAs

import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
import re
import pywin32_system32
from sqlalchemy import create_engine,MetaData
from sqlalchemy_utils import create_database,database_exists
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Table,Column,Integer,ForeignKey,String,Text,FLOAT,VARCHAR
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.ext.declarative import declarative_base
import numpy as np

from win32com.client.dynamic import Dispatch,ERRORS_BAD_CONTEXT
import os
import winerror

import sys
import importlib
importlib.reload(sys)
import os
from tqdm import tqdm
import pymysql
from pdfminer.pdfpage import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfpage import PDFTextExtractionNotAllowed

def pdf2txt(pdffile,save_path):
    with open (pdffile,'rb') as f:
        parser=PDFParser(f)
        doc=PDFDocument()
        parser.set_document(doc)
        doc.set_parser(parser)

        doc.initialize()
        if not doc.is_extractable():
            raise PDFTextExtractionNotAllowed
        resource = PDFResourceManager()  # 创建PDF资源管理器对象来存储共享资源
        laparam = LAParams()  # 参数分析器
        device = PDFPageAggregator(resource, laparams=laparam)  # 创建一个聚合器
        interpreter = PDFPageInterpreter(resource, device)  # 创建PDF页面解释器
def acrobat2txt(f_path,d_path):

    try:
        AvDoc=Dispatch("AcroExch.AVDoc")
        print(1)
        AvDoc.Open(f_path,"")
        print(2)
        pdDoc=AvDoc.GetPDDoc()
        print(3)
        jsObject=pdDoc.GetJSObject()
        print(4)
        jsObject.SaveAs(d_path,"com.adobe.acrobat.plain-text")
        #jsObject.SaveAs(d_path, "com.adobe.acrobat.docx")
        print(5)
        print("OK")
    except Exception as e:
        print('error')
        print(e)
    finally:
        pdDoc.close()
        AvDoc.close(True)
def get_stkcds(engine,tablename,database):
    command="select distinct stkcd from {}.{} where getted=False ".format(database,tablename)
    with engine.connect() as conn:
        result_proxy=conn.execute(command)
    return result_proxy
def get_items(engine,stkcd,database,tablename):
    # if stkcd is not None:
    #     data=pd.read_sql(sql='SELECT link,text FROM base_imformation where stkcd={};'.format(stkcd),
    #                      con=engine)
    # else:
    #     data=pd.read_sql(sql='SELECT stkcd FROM base_imformation ;',
    #                      con=engine)
    command = "select link,enddate,title from {}.{} where stkcd={} and getted=False".format(database, tablename,stkcd)
    with engine.connect() as conn:
        result_proxy = conn.execute(command)
    return result_proxy
def get_web(url:str,proxy ='不使用'):
    """格式化的request和解析"""
    if proxy=='不使用':
        r = requests.get(url)
    else:
        r=requests.get(url,proxies=proxy,timeout=5)
    r.encoding='utf-8'
    html = r.text
    soup = BeautifulSoup(html, 'html.parser')
    return soup,r.status_code
def save_pdf(date:str,soup,save_path,stkcd,title):
    span=soup.find('span',attrs={'class':"zwtitlepdf"})
    pdf=span.find('a')['href']
    r=requests.get(pdf)
    dir=save_path+'\\'+str(stkcd).zfill(6)+'\\'+'pdf'
    filename=gen_filename(dir,title,date)
    with open(filename+'.pdf', 'wb') as f:
        f.write(r.content)
        f.close()
    return filename
def gen_filename(dir,title,date):
    if not os.path.exists(dir):
        os.makedirs(dir)
    filename=clean_title(title+date)
    filename=dir+'\\'+filename
    return filename
def clean_title(title):
    """
    去除文件名中的非法字符：
    ?”、“、”、“_”、“/”、“*”、““”、“”“、“<”、“>”、“|”
    < > / \ | : " * ?
    ['_', '"', '*', '\\', '<', '/', '?', '、', '“', '|', '>', '”', ':']
    :param title:
    :return:
    """
    #banlist=['?','、','_','/','*','“','”','<','>','|','\\','\\',':' ,'"' ,'*','?']
    "去除非法符号中的重复值"
    #banlist=list(set(banlist))
    #print(banlist)
    pattern=re.compile(r'[_"*\</?、“|>”:]')
    filename=re.sub(pattern,'',title)
    return filename
def save_text_to_sql(text,link,conn,cursor):
    update_text_sql="update url.base_imformation set text={} where link={}".format(text,link)
    cursor.execute(update_text_sql)
    conn.commit()
def get_random_proxy(proxypool_url='http://127.0.0.1:5555/random'):
    """
    get random proxy from proxypool
    :return: proxy
    """
    return requests.get(proxypool_url,timeout=5).text.strip()
def write_log(logpath,errortype,feature):
    if not os.path.exists(logpath):
        os.makedirs(logpath)
    if errortype=='':
        pass
    pass
def update_get_state(engine,database,tablename,mode,stkcd=0):
    if mode=='single board':
        command="update {}.{} set getted=true where  index<i ".format(database,tablename)
    elif mode=='single stock':
        command = "update {}.{} set getted=true where stkcd={} ".format(database, tablename, stkcd)

    with engine.connect() as conn:
        result_proxy = conn.execute(command)
def build_ip_pool(proxy,poolfile):
    with open(poolfile,'a') as f:
        f.write(proxy+'\n')
        f.close()
def update_getted_state(path,database,tablename,engine):
    dirs=os.listdir(path)
    for i in range(len(dirs)):
        dirs[i]=int(dirs[i])
    last_stock=max(dirs)
    boards=os.listdir(path+'\\'+str(last_stock).zfill(6)+'\\'+'pdf')
    num=len(boards)
    command='update {}.{} set getted=true where stkcd={} and `index`<{}'.format(database,tablename,int(last_stock),num)
    with engine.connect() as conn:
        result_proxy = conn.execute(command)
    print('最后一只股票为{}，目前爬取了{}条公告。 \n -------数据库初始状态更新完毕---------'.format(last_stock,num))
    return result_proxy
def main():
    engine = create_engine('mysql+pymysql://root:a123456a@localhost:3306/firm', echo=False)
    file2link=dict()
    stkcd2dir=dict()
    tablename="base_imformation"
    database='firm'
    path='G:\公告pdf文件'
    update_getted_state(path=path,database=database,tablename=tablename,engine=engine)
    stkcds=get_stkcds(engine,tablename,database)
    save_path='G:\公告pdf文件'
    stkcd_convert=[]
    proxypool_url = 'http://127.0.0.1:5555/random'

    "创建最外层的存储文件夹，用于存储pdf文件"
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    "开始爬取和存储"
    while True:
        "一次循环爬取一只股票的所有公告"
        try:
            stkcd=next(stkcds)[0]
            stkcd_convert.append(stkcd)
        except StopIteration:
            print('全部爬取完成')
            break
        "生成存储该股票的文件夹"
        f_path = save_path + '\\' + str(stkcd).zfill(6) + '\\'+'pdf'
        d_path = save_path + '\\' + str(stkcd).zfill(6) + '\\'+ 'txt'
        stkcd2dir[stkcd]=f_path
        items=get_items(engine,stkcd,database,tablename)
        if not os.path.exists(d_path):
            os.makedirs(d_path)
        if not os.path.exists(f_path):
            os.makedirs(f_path)
        i=0

        while True:
            "一次循环获取一条公告信息"
            start_get=time.time()
            try:
                (link,date,title)=next(items)
                print(link,date,title)
            except StopIteration:
                print('股票{}已爬取完成'.format(stkcd.zfill(6)))
                update_get_state(engine, database, tablename, mode='single stock',stkcd=stkcd)
                break
            try:
                soup, status = get_web(link)
                print(status)
                time.sleep(0.1)
                if status != 200:
                    times=0
                    #print('ccdcdcdsdfcsddscscds')
                    #update_get_state(engine, database, tablename, mode='single board',url=link)
                    while status!=200:
                        proxy=get_random_proxy(proxypool_url)
                        proxies={'http':'http://'+proxy}
                        try:
                            soup, status = get_web(link,proxy=proxies)
                            if status==200:
                                build_ip_pool(proxy=proxy,poolfile='./ipPool.txt')
                            print('状态码',status,link)
                        except:
                            status=404
                            times+=1
                            if times>20:
                                break
            except:
                print('未知错误')



            get_time=time.time()-start_get
            print('爬取该条公告所用时间为: {}'.format(time.time()-start_get))
            try:
                save_pdf(date,soup, save_path, stkcd, title)
                #update_get_state(engine,database,tablename,link)
                i+=1
                print(title,'保存时间为：{}'.format(time.time()-start_get-get_time))
            except AttributeError:
                print('error')

if __name__ == "__main__":
   main()











    #print(next(stkcds)[0])
    # stkcds.drop_duplicates()
    # os.mkdir("output")
    # files = list(filter(lambda f: f.endswith('.pdf'), os.listdir()))
    # for file in files:
    #     print('convert:', file)
    #     outfile = file.replace('.pdf', '.txt')
    # ERRORS_BAD_CONTEXT.append(winerror.E_NOTIMPL)


