import json, os, threading, xlrd
from time import sleep
from urllib import parse
from xlutils.copy import copy

from pdfminer.converter import PDFPageAggregator
from pdfminer.pdfparser import PDFParser
from pdfminer.layout import *
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
import requests

keywords = ['移动互联网',   '工业互联网', '移动互联', '互联网医疗', '电子商务', '移动支付', '第三方支付',
            'NFC支付',     '智能能源',   'B2B', 'B2C', 'C2B', 'C2C',
            'O2O',
            '网联',        '智能穿戴',  '智慧农业', '智能交通', '智能医疗', '智能客服', '智能家居', '智能投顾', '智能文旅', '智能环保',
            '智能电网',     '智能营销',  '数字营销',
            '无人零售',     '互联网金融', '数字金融', 'Fintech', '金融科技', '量化金融', '开放银行', '人工智能', '商业智能', '图像理解',
            '投资决策辅助系统',
            '智能数据分析',
            '智能机器人',   '机器学习', '深度学习', '语义探索', '生物识别技术', '人脸识别', '语音识别', '身份验证', '自动驾驶', '自然语言处理',
            '大数据',      '数据',
            '数据挖掘',
            '文本挖掘',    '数据可视化',  '异构数据', '征信', '增强现实', '混合现实', '虚拟现实', '云计算', '流计算', '图计算',
            '内存计算',    '多方安全计算', '类脑计算',
            '绿色计算',    '认知计算', '融合架构', '亿级并发', 'EB级存储', '物联网', '信息物理系统', '区块链', '数字货币', '分布式计算',
            '差分隐私技术', '智能金融合约']
company_names = []

def get_adress():
    global company_names
    global keywords
    download_counts = 0
    url = 'http://www.cninfo.com.cn/new/data/szse_stock.json'
    data = {'keyWord': 1,
            'maxSecNum': 10,
            'maxListNum': 5}
    hd = {'Host': "www.cninfo.com.cn",
          'Origin': 'http://www.cninfo.com.cn',
          'Pragma': 'no-cache',
          'Accept-Encoding': 'gzip,deflate',
          'Connection': 'keep-alive',
          'Content-Length': '70',
          'User-Agent': 'Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome '
                        '/75.0.3770.100Safari / 537.36',
          'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
          'Accept': 'application/json,text/plain,*/*',
          'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'}
    r = requests.post(url, headers=hd, data=data)
    # print(r.text)
    r_content = r.content
    r.close()
    m = str(r_content, encoding='utf-8')
    pk = json.loads(m)
    length = len(pk['stockList'])
    # print(length)
    for i in range(length):
        file_list = []
        txt_folder = ''
        download_counts += 1
        if download_counts % 10 == 0:
            sleep(30)
        orgId = pk['stockList'][i + 4]['orgId']
        zwjc = pk['stockList'][i + 4]['zwjc']
        code = pk['stockList'][i + 4]['code']
        if zwjc not in company_names:
            # print("pass")
            pass
        else:
            print(orgId, code, zwjc, i)
            if '*' in zwjc:
                zwjc = zwjc.replace('*', '')
            if not os.path.exists(code + zwjc):
                os.mkdir(code + zwjc)
                file_list = get_PDF(orgId, code, zwjc)
                print(file_list)
                for file_name in file_list:
                    path_name = load_pdf(file_name, code, zwjc)
                    pdf_path = path_name + '.pdf'
                    txt_path = path_name + '.txt'
                    parsePDF(pdf_path, txt_path)
                    print('pdf_path---------------------------------------->' + pdf_path)
                    txt_folder = os.getcwd() + '\\' + code + zwjc
                    # print(file_name)
                if file_list is not None:
                    matchKeyWords(txt_folder, keywords)
            else:
                continue
            print('下一家~')
            # return orgId, code, zwjc


def download_PDF(url, file_name, zwjc, code):
    url = url
    r = requests.get(url)
    # print(url)
    if '*' in zwjc:
        zwjc = zwjc.replace('*', '')
    f = open(code + zwjc + '/' + file_name + '.pdf', 'wb')
    f.write(r.content)
    r.close()


def get_PDF(orgId, code, zwjc):
    url = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
    data = {'pageNum': '1',
            'pageSize': 30,
            'column': 'szse',
            'tabName': 'fulltext',
            'plate': '',
            'stock': '{},{}'.format(code, orgId),
            'searchkey': '',
            'secid': '',
            'category': 'category_ndbg_szsh',
            'trade': '',
            'seDate': '2016-12-01~2023-12-18',
            'sortName': '',
            'sortType': '',
            'isHLtitle': 'true'}
    hd = {'Host': "www.cninfo.com.cn",
          'Origin': 'http://www.cninfo.com.cn',
          'Pragma': 'no-cache',
          'Accept-Encoding': 'gzip,deflate',
          'Connection': 'keep-alive',
          'Content-Length': '70',
          'User-Agent': 'Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome '
                        '/75.0.3770.100Safari / 537.36',
          'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
          'Accept': 'application/json,text/plain,*/*',
          'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'}
    data = parse.urlencode(data)
    # print(url)
    r = requests.post(url, headers=hd, data=data)
    # print(r.text)
    r_str = str(r.content, encoding='utf-8')
    r.close()
    sleep(2)
    r_json = json.loads(r_str)
    reports_list = r_json['announcements']
    file_list = []
    for report in reports_list:
        if '摘要' not in report['announcementTitle']:
            if '20' not in report['announcementTitle']:
                pass
            elif 'H' in report['announcementTitle']:
                pass
            elif '已取消' in report['announcementTitle']:
                pass
            elif '2015' in report['announcementTitle']:
                pass
            elif zwjc not in company_names:
                continue
            else:
                pdf_url = 'http://static.cninfo.com.cn/' + report['adjunctUrl']
                file_name = code + '-' + zwjc + '-' + report['announcementTitle']
                print('正在下载：' + pdf_url, '存放在当前目录：//' + file_name)
                file_list.append(file_name)
                download_PDF(pdf_url, file_name, zwjc, code)
                sleep(2)
        else:
            continue
    return file_list


def load_pdf(file_name, code, zwjc):
    file_path = os.getcwd() + '\\' + code + zwjc + '\\'
    print(file_path)
    pdf_path = os.path.join(file_path, file_name)
    return pdf_path


def parsePDF(pdf_path, txt_path):
    fp = open(pdf_path, 'rb')
    parser = PDFParser(fp)
    doc = PDFDocument(parser=parser)
    parser.set_document(doc=doc)
    resource = PDFResourceManager()
    laparam = LAParams()
    device = PDFPageAggregator(resource, laparams=laparam)
    interpreter = PDFPageInterpreter(resource, device)
    num_page, num_image, num_curve, num_figure, num_TextBoxHorizontal = (0, 0, 0, 0,
                                                                         0)
    for page in PDFPage.get_pages(fp):
        num_page += 1
        interpreter.process_page(page)
        layout = device.get_result()
        for x in layout:
            if isinstance(x, LTImage):
                num_image += 1
            if isinstance(x, LTCurve):
                num_curve += 1
            if isinstance(x, LTFigure):
                num_figure += 1
            if isinstance(x, LTTextBoxHorizontal):
                num_TextBoxHorizontal += 1
                with open(txt_path, 'a', encoding='UTF-8', errors='ignore') as (f):
                    results = x.get_text()
                    # print(results, end='')
                    f.write(results + '\n')
        #     print('对象数量：\n', '页面数：%s\n' % num_page, '图片数：%s\n' % num_image, '曲线数：%s\n' % num_curve,
        #           '水平文本框：%s\n' % num_TextBoxHorizontal)
        # print('this pdf_txt download')


# 读取路径内所有txt文件并分析
def matchKeyWords(txt_folder, keyWords):
    files = os.listdir(txt_folder)
    words_num = []
    for file in files:
        word_freq = {}
        if os.path.splitext(file)[-1] == '.txt':
            txt_path = os.path.join(txt_folder, file)
            with open(txt_path, 'r', encoding='utf-8', errors='ignore') as (fp):
                text = fp.readlines()
                alltext = ''
                for line in text:
                    alltext += line.replace("\n", "")
                for word in keyWords:
                    num = 0
                        # for line in text:
                    num += alltext.count(word)
                    word_freq[word] = num
            #  统计Txt文件完毕 将对应字段封装添加
            stock_code = file.split('-')[0]
            stock_name = file.split('-')[1]
            year = file.split('-')[2][0:4]
            print('stock_code: ' + str(stock_code))
            print('stock_name: ' + str(stock_name))
            print('year: ' + str(year))
            words_num.append((word_freq, stock_code, stock_name, year))
            print('words_num: ' + str(words_num))
    workbook = xlrd.open_workbook(os.getcwd() + '\\' + '2016-2021年中国A股上市公司的数字化转型关键词词频数.xls')
    print(os.getcwd() + '\\' + '2016-2021年中国A股上市公司的数字化转型关键词词频数.xls')
    # workbook.
    sheet = workbook.sheet_by_index(0)

    rows = sheet.nrows  # 已经有了几行
    new_workbook = copy(workbook)  # 复制一个excel在次之上进行操作
    new_worksheet = new_workbook.get_sheet(0)  # 获取第一个工作表

    for i, word in enumerate(keywords):
        new_worksheet.write(0, i + 3, word)

    for index, one in enumerate(words_num):
        word_f = one[0]
        stock_code = one[1]
        stock_name = one[2]
        year = one[3]
        for ind, word in enumerate(keyWords):
            new_worksheet.write(rows + index + 1, ind + 4, str(word_f[word]))
        else:
            new_worksheet.write(rows + index + 1, 0, stock_code)
            new_worksheet.write(rows + index + 1, 1, stock_name)
            new_worksheet.write(rows + index + 1, 2, year)

    new_workbook.save(os.getcwd() + '\\' + '2016-2021年中国A股上市公司的数字化转型关键词词频数.xls')
    # else:


def get_company_names():
    global company_names
    xlsx = xlrd.open_workbook('./2022.xls')
    table = xlsx.sheet_by_index(0)
    nrows = table.nrows
    company_names = []
    for i in range(1, nrows):
        company_names.append(str(table.cell_value(i, 1)))


if __name__ == '__main__':
    get_company_names()
    # t1 = threading.Thread(get_adress())
    # t2 = threading.Thread(get_adress())
    # t1.start()
    # t2.start()
    get_adress()
    print('All done!')
# okay decompiling pachong.pyc
