# -*- coding: utf-8 -*-
"""
Created on Sat Jun 25 09:03:21 2022
http://guba.eastmoney.com/list,000002,3,f.html
@author: 29165

<span class="zwtitlepdf">
<a href="https://pdf.dfcfw.com/pdf/H2_AN202301121581912665_1.pdf"><img src="/images/pdf.png"
 alt="" width="18" height="19">查看PDF原文</a></span>
"""
import os.path
#from tkinter.filedialog import SaveAs

import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
import re
import pywin32_system32
from sqlalchemy import create_engine,MetaData
from sqlalchemy_utils import create_database,database_exists
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Table,Column,Integer,ForeignKey,String,Text,FLOAT,VARCHAR
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.ext.declarative import declarative_base
import numpy as np

from win32com.client.dynamic import Dispatch,ERRORS_BAD_CONTEXT
import os
import winerror

import sys
import importlib
importlib.reload(sys)
import os
from tqdm import tqdm

from pdfminer.pdfpage import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
def pdf2txt(pdffile,save_path):
    with open (pdffile,'rb') as f:
        parser=PDFParser(f)
        doc=PDFDocument()
        parser.set_document(doc)
        doc.set_parser(parser)

        doc.initialize()
        if not doc.is_extractable():
            raise PDFTextExtractionNotAllowed
        resource = PDFResourceManager()  # 创建PDF资源管理器对象来存储共享资源
        laparam = LAParams()  # 参数分析器
        device = PDFPageAggregator(resource, laparams=laparam)  # 创建一个聚合器
        interpreter = PDFPageInterpreter(resource, device)  # 创建PDF页面解释器
def acrobat2txt(f_path,d_path):

    try:
        AvDoc=Dispatch("AcroExch.AVDoc")
        print(1)
        AvDoc.Open(f_path,"")
        print(2)
        pdDoc=AvDoc.GetPDDoc()
        print(3)
        jsObject=pdDoc.GetJSObject()
        print(4)
        jsObject.SaveAs(d_path,"com.adobe.acrobat.plain-text")
        print(5)
        print("OK")
    except Exception as e:
        print('error')
        print(e)
    finally:
        pdDoc.close()
        AvDoc.close(True)





def get_link(engine,stkcd=None):
    if stkcd is not None:
        data=pd.read_sql(sql='SELECT link,text FROM base_imformation where stkcd={};'.format(stkcd),
                         con=engine)
    else:
        data=pd.read_sql(sql='SELECT stkcd FROM base_imformation ;',
                         con=engine)
    return data
def get_web(url:str):
    """格式化的request和解析"""
    r = requests.get(url)
    r.encoding='utf-8'
    html = r.text
    soup = BeautifulSoup(html, 'html.parser')
    return soup,r.status_code
def save_pdf(soup,save_path,stkcd,title,date:str):
    span=soup.find('span',attrs={'class':"zwtitlepdf"})
    pdf=span.find('a')['href']
    r=requests.get(pdf)
    dir=save_path+'\\'+str(stkcd).zfill(6)
    if not os.path.exists(dir):
        os.makedirs(dir)
    filename=clean_title(title)+date
    filename=dir+'\\'+filename
    with open(filename+'.pdf', 'wb') as f:
        f.write(r.content)
        f.close()
def clean_title(title):
    """
    去除文件名中的非法字符：
    ?”、“、”、“_”、“/”、“*”、““”、“”“、“<”、“>”、“|”
    < > / \ | : " * ?

    :param title:
    :return:
    """
    banlist=['?','、','_','/','*','“','”','<','>','|','\\','\\',':' ,'"' ,'*','?']
    banlist=list(set(banlist))
    pattern=re.compile()
    return filename
def get_stkcd(file,headers):
    data=pd.read_excel(file+'公告页数.xlsx')
    print(data.head(5))
    stkcds=list(data['code'])
    pages=list(data['pages'])
    index=0
    for stkcd in stkcds:
        stkcds[index]=str(stkcd).zfill(6)
        index+=1
    #(stkcds)
    return stkcds,pages

def main():
    # engine = create_engine('mysql+pymysql://root:a123456a@localhost:3306/url', echo=False)
    # stkcds=get_link(engine)
    # stkcds.drop_duplicates()
    # os.mkdir("output")
    # files = list(filter(lambda f: f.endswith('.pdf'), os.listdir()))
    # for file in files:
    #     print('convert:', file)
    #     outfile = file.replace('.pdf', '.txt')
    ERRORS_BAD_CONTEXT.append(winerror.E_NOTIMPL)
    f_path='D:\公司年报爬取\金融学文本大数据挖掘方法与研究进展_姚加权.pdf'
    d_path='D:\公司年报爬取\测试年报.txt'
    acrobat2txt(f_path,d_path)
main()
