# -*_ codeing=utf-8 -*-
# @Time: 2022/12/29 16:32
# @Author: foxhuty
# @File: stocks_spider.py
# @Software: PyCharm
# @Based on python 3.10

import csv
from sqlalchemy import create_engine
import pandas as pd
import httpx


def read_into_sql(file_name, tb_name, db_name='db_crawling_data'):
    """
    将爬取到的数据存入MySQL数据库
    :param file_name: 文件名
    :param tb_name: 表名
    :param db_name: 库名
    """
    engine = create_engine(f"mysql+pymysql://root:foxmmer@localhost/{db_name}")
    conn = engine.connect()
    data = pd.read_csv(file_name)
    data.to_sql(name=tb_name, con=conn, index=False, if_exists='replace')
    conn.close()


def read_from_mysql(tb_name, db_name='db_crawling_data'):
    """
    从MySQL数据库中读取数据，以excel格式存储。
    :param tb_name: 表名
    :param db_name: 数据库名
    """
    engine = create_engine(f'mysql+pymysql://root:foxmmer@localhost/{db_name}')
    conn = engine.connect()
    data = pd.read_sql(sql=f"select * from {tb_name}", con=conn)
    print(data.head())
    with pd.ExcelWriter(f'D:\\爬虫数据\\{tb_name}.xlsx') as writer:
        data.index = [idx + 1 for idx in data.index]
        data.to_excel(writer, sheet_name=tb_name, index_label='序号')
    conn.close()


def stocks_crawling(file_name):
    with open(f'D:\\爬虫数据\\{file_name}', mode='a', encoding='utf-8', newline='') as file_data:
        csv_writer = csv.DictWriter(file_data,
                                    fieldnames=['股票代码', '股票名称', '当前价格', '涨跌额', '涨跌幅', '年初至今',
                                                '成交量', '成交额', '换手率', '市盈率(TTM)', '股息率', '市值'])
        csv_writer.writeheader()
        data_list = []
        for page in range(1, 5):
            url = f"https://xueqiu.com/service/v5/stock/screener/quote/list?page={page}&size=30&order=desc&order_by=amount&exchange=CN&market=CN&type=sha&_=1650469926246"
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
            }
            response = httpx.get(url, headers=headers)
            page_json = response.json()

            for data in page_json['data']['list']:
                # print(data['name'])
                symbol = data['symbol']
                name = data['name']
                price = data['current']
                chg = data['chg']
                if chg:
                    if float(chg) > 0:
                        chg = '+' + str(chg)
                    else:
                        chg = str(chg)
                percent = data['percent']
                if percent:
                    if float(percent) > 0:
                        percent = '+' + str(percent) + '%'
                    else:
                        percent = str(percent) + '%'
                current_year_percent = str(data['current_year_percent']) + '%'
                volume = data['volume']
                amount = data['amount']
                turn_over = str(data['turnover_rate']) + '%'
                pe_ttm = data['pe_ttm']
                dividend = data['dividend_yield']
                if dividend:
                    dividend = str(dividend) + '%'
                else:
                    dividend = None
                market_capital = data['market_capital']
                data_dict = {
                    '股票代码': symbol,
                    '股票名称': name,
                    '当前价格': price,
                    '涨跌额': chg,
                    '涨跌幅': percent,
                    '年初至今': current_year_percent,
                    '成交量': volume,
                    '成交额': amount,
                    '换手率': turn_over,
                    '市盈率(TTM)': pe_ttm,
                    '股息率': dividend,
                    '市值': market_capital
                }
                csv_writer.writerow(data_dict)
                print(symbol, name, price, chg, percent, current_year_percent, volume, amount, turn_over, pe_ttm,
                      dividend,
                      market_capital)
    tb_name = file_name.split('.')[0]
    read_into_sql(f'D:\\爬虫数据\\{file_name}', tb_name)
    read_from_mysql(tb_name)


if __name__ == '__main__':
    stocks_crawling('stocks_2023_1_4.csv')
