# -*- coding = utf-8 -*-
# @Time : 2021/8/11 21:25
# @Author : 龙王赘婿_彪少
# @File : caobao.py
# @Software: PyCharm

# 爬取网页财报

# 正则表达式.Beautiful Soup.Lxml都是解析网页用的
import urllib.request, urllib.error
from lxml import html
from bs4 import BeautifulSoup  # 网页解析
import os
import re
import requests
import time
import json
import openpyxl


# 这个标头是防止反爬的，
# 1.headers伪装，服务器使用这个判断你使用的浏览器，当有这个的时候，服务器会认为是浏览器正常的请求。
# 2.Referer：浏览器通过此来判断你从哪一个网页跳转过来。
# 3.Cookie伪装，cookie是服务器用来辨别你此时的状态的，比如你已经登录啊，什么的，每一次向服务器请求cookie都会随之更新。
def header_plus():
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
        'cookie': 'Hm_lvt_cb7f29be3c304cd3bb0c65a4faa96c30=1626761071; Hm_lpvt_cb7f29be3c304cd3bb0c65a4faa96c30=1627519587',
        'Referer': 'http: // www.sky - robotics.cn /'
    }
    return headers


# 这个标头是简单的，仅告诉浏览器类型
def header():
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    }
    return headers


'''
函数名：    connetion
入口参数：   url 链接网络字符串
返回值：    content
功能：     
'''


def getHtml(url):
    # 获取标头
    headers = header()
    # 打包一个请求
    request = urllib.request.Request(url=url, headers=headers, method="GET")
    # 接收字符串
    html = ""
    try:
        # 进行请求
        resource = urllib.request.urlopen(request, timeout=10)
        # 解码
        html = resource.read().decode("utf-8")
        # 测试打印
        # print(html)
    # 万一出现问题进行处理
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


if __name__ == '__main__':
    print('begin')
    # 要爬取的网页
    # url = 'http://www.pptok.com/the-tempting-fruit-ppt-background.html'
    # url = 'http://www.sky-robotics.cn/'

    # 方式A：把爬取到的网页保存成html
    # file = open("./my.html", mode="w")
    # file.write(getHtml(url))
    # file.close

    # 变量，计数
    count = 0
    # 构建一个正则表达式
    pat = re.compile(".jpg")
    # 图片保存路径
    path = '../../download2/'

    # excel操作
    # 读取表单
    # wb = openpyxl.load_workbook("毛辉小姐姐报表.xlsx")
    wb = openpyxl.Workbook()
    # 打印表单名字
    # print(wb.sheetnames)
    # 创建表单
    Sheet = wb.create_sheet("caibao", 0)
    Sheet.cell(1, 1, value="序号")
    Sheet.cell(1, 2, value="股票代码")
    Sheet.cell(1, 3, value="股票简称")
    Sheet.cell(1, 4, value="每股收益(元)")
    Sheet.cell(1, 5, value="营业收入(元)")
    Sheet.cell(1, 6, value="同比增长(%)")
    Sheet.cell(1, 7, value="季度环比增长(%)")
    Sheet.cell(1, 8, value="净利润(元)")
    Sheet.cell(1, 9, value="同比增长(%)")
    Sheet.cell(1, 10, value="季度环比增长(%)")
    Sheet.cell(1, 11, value="每股净资产(元)")
    Sheet.cell(1, 12, value="净资产收益率(%)")
    Sheet.cell(1, 13, value="每股经营现金流量(元)")
    Sheet.cell(1, 14, value="销售毛利率(%)")
    Sheet.cell(1, 15, value="利润分配")
    Sheet.cell(1, 16, value="所处行业")
    Sheet.cell(1, 17, value="最新公告日期")

    imgs = []
    i = 0

    print('开始爬取网页')
    # 方式B：把爬取到的网页保存成str
    for j in range(1, 31):
        # try:
        # 延时防止反爬
        print('第' + str(j) + '页')
        time.sleep(0.01)

        url = 'http://datacenter-web.eastmoney.com/api/data/get?callback=jQuery11230722601848913554_1628873201738&st=UPDATE_DATE%2CSECURITY_CODE&sr=-1%2C-1&ps=50&p='+ str(j) + '&type=RPT_LICO_FN_CPD&sty=ALL&token=894050c76af8597a853f5b408b759f5d&filter=(REPORTDATE%3D%272021-06-30%27)'

        html = getHtml(url)

        # file = open("./test01.html", mode="w")
        # file.write(html)
        # file.close
        # html.encode("gbk")
        # file = open("./test01.html", mode="r", encoding="GBK")
        # data = re.findall(r"jQuery1123013860420202394064_1628685910155\((.+?)\);", str(file.readline()))

        data = re.findall(r"jQuery\d+_\d+\((.+?)\);", str(html))
        # print(data[0])
        jsonObj = json.loads(data[0])
        # print(jsonObj['version'])
        mydata = jsonObj['result']['data']
        leng = len(jsonObj['result']['data'])
        for k in range(leng):
            print("第"+str(i+1)+"个")
            # print(mydata[k])
            Sheet.cell(i+2, 1, value=i+1)
            Sheet.cell(i+2, 2, value=str(mydata[k]['SECURITY_CODE']))
            Sheet.cell(i+2, 3, value=str(mydata[k]['SECURITY_NAME_ABBR']))
            Sheet.cell(i+2, 4, value=str(mydata[k]['BASIC_EPS']))
            Sheet.cell(i+2, 5, value=str(mydata[k]['TOTAL_OPERATE_INCOME']))
            Sheet.cell(i+2, 6, value=str(mydata[k]['YSTZ']))
            Sheet.cell(i+2, 7, value=str(mydata[k]['YSHZ']))
            Sheet.cell(i+2, 8, value=str(mydata[k]['PARENT_NETPROFIT']))
            Sheet.cell(i+2, 9, value=str(mydata[k]['SJLTZ']))
            Sheet.cell(i+2, 10, value=str(mydata[k]['SJLHZ']))
            Sheet.cell(i+2, 11, value=str(mydata[k]['BPS']))
            Sheet.cell(i+2, 12, value=str(mydata[k]['WEIGHTAVG_ROE']))
            Sheet.cell(i+2, 13, value=str(mydata[k]['MGJYXJJE']))
            Sheet.cell(i+2, 14, value=str(mydata[k]['XSMLL']))
            Sheet.cell(i+2, 15, value=str(mydata[k]['ASSIGNDSCRPT']))
            Sheet.cell(i+2, 16, value=str(mydata[k]['PUBLISHNAME']))
            Sheet.cell(i+2, 17, value=str(mydata[k]['UPDATE_DATE']))
            i = i+1
    wb.save("毛辉小姐姐报表.xlsx")

    print("end")
