import pymysql
import json
import re
import requests


cookies = {
    'qgqp_b_id': 'cfa856f4432815c73fe9084c9032b744',
    'HAList': 'ty-1-000300-%u6CAA%u6DF1300%2Cty-1-000001-%u4E0A%u8BC1%u6307%u6570',
    'emshistory': '%5B%22%E6%B2%AA300%22%2C%22%E5%A4%A7%E5%AE%97%E4%BA%A4%E6%98%93%22%5D',
    'st_si': '66286553069740',
    'st_asi': 'delete',
    'st_pvi': '46983465161335',
    'st_sp': '2023-12-05%2009%3A23%3A48',
    'st_inirUrl': 'https%3A%2F%2Flink.zhihu.com%2F',
    'st_sn': '7',
    'st_psi': '20240318210003109-113200301324-3261608991',
}

headers = {
    'Accept': '*/*',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'Connection': 'keep-alive',
    # Requests sorts cookies= alphabetically
    # 'Cookie': 'qgqp_b_id=cfa856f4432815c73fe9084c9032b744; HAList=ty-1-000300-%u6CAA%u6DF1300%2Cty-1-000001-%u4E0A%u8BC1%u6307%u6570; emshistory=%5B%22%E6%B2%AA300%22%2C%22%E5%A4%A7%E5%AE%97%E4%BA%A4%E6%98%93%22%5D; st_si=66286553069740; st_asi=delete; st_pvi=46983465161335; st_sp=2023-12-05%2009%3A23%3A48; st_inirUrl=https%3A%2F%2Flink.zhihu.com%2F; st_sn=7; st_psi=20240318210003109-113200301324-3261608991',
    'Referer': 'https://quote.eastmoney.com/zs000300.html',
    'Sec-Fetch-Dest': 'script',
    'Sec-Fetch-Mode': 'no-cors',
    'Sec-Fetch-Site': 'same-site',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0',
    'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Microsoft Edge";v="122"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
}

params = {
    'cb': 'jQuery351015002031282628914_1710766802562',
    'secid': '1.000300',
    'ut': 'fa5fd1943c7b386f172d6893dbfba10b',
    'fields1': 'f1,f2,f3,f4,f5,f6',
    'fields2': 'f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61',
    'klt': '101',
    'fqt': '1',
    'end': '20500101',
    'lmt': '120',
    '_': '1710766802589',
}

resp = requests.get('https://push2his.eastmoney.com/api/qt/stock/kline/get', params=params, cookies=cookies, headers=headers)


# 对爬取的数据进行json处理（json.findall或者json.search）（已完成）
# 保存数据到数据库，这里有两个思路。
#     1、例如: 2024-03-13,3594.42,3572.36,3594.42,3563.37,144590590,266898756379.80,0.86,-0.70,-25.13,0.47
#             2024-03-14,3571.79,3562.22,3597.46,3545.61,140962722,263803609335.20,1.45,-0.28,-10.14,0.46
#             2024-03-15,3554.91,3543.49,3566.48,3533.68,67909073,126442629023.20,0.92,-0.53,-18.73,0.22
#             对上述已经遍历的数据采用分隔符，以列表的形式获取每一行数据，再遍历列表分别保存到数据库的每一列下。
#             data_everyday = "2024-03-13,3594.42,3572.36,3594.42,3563.37,144590590,266898756379.80,0.86,-0.70,-25.13,0.47"
#             data_list = data_everyday.split(",")
#             返回的data_list=["2024-03-13","3594.42","3572.36","3594.42","3563.37","144590590","266898756379.80","0.86","-0.70","-25.13","0.47"]
#             for data in data_list:
#                   sql = "insert into db1(title, inq, rating_num.....) values(%s, %s, %s.....)"



# responses_text = re.findall(r'\{.*?\}', resp.text)
# text = responses_text[0]
# 使用正则表达式提取JSON部分
json_data_match = re.search(r'\(({.*})\)', resp.text)
json_data = json.loads(json_data_match.group(1)) if json_data_match else None

# 或者
# json_data_match = re.findall(r'\(({.*})\)', resp.text)
# json_data = json.loads(json_data_match[0]) if json_data_match else None

# 获取klines列表内容
klines_data = json_data['data']['klines'] if json_data else None

# 打印提取的JSON数据和klines列表内容
# print("提取的JSON数据:")
# print(json_data)

for data in klines_data:
    print(data)
# print("\n提取的klines列表内容:")
# print(klines_data)



