#!/usr/bin/env python
# coding: utf-8

# In[1]:


import re
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import sparkapi

def extract_news_data(page_source):
    """
    从网页源码中提取新闻数据。
    """
    # 定义正则表达式模式，用于匹配新闻的标题、链接、日期和摘要
    patterns = {
        'title': r'<div class="news_item_t".*?><a href=".*?" target="_blank">(.*?)</a></div>',
        'href': r'<div class="news_item_t".*?><a href="(.*?)" target="_blank">.*?</a></div>',
        'date': r'<span class="news_item_time">(\d+-\d+-\d+ \d+:\d+:\d+) - </span>',
        'abstract': r'<div class="news_item_c"><span class="news_item_time">.*? - </span><span>(.*?)</span></div>',
    }
    
    # 使用正则表达式提取新闻数据
    # 这里遍历patterns字典，对每个正则表达式模式进行查找，并将结果存储在news_data字典中
    news_data = {
        key: re.findall(pattern, page_source) for key, pattern in patterns.items()
    }
    

    min_len = min(len(data) for data in news_data.values())
    
    cleaned_data = [
        {key: re.sub('<.*?>', '', news_data[key][i]) for key in patterns.keys()}
        for i in range(min_len)
    ]
    
    return cleaned_data


def get_company_news(stock_code, pages=None):
    """获取公司新闻"""
    edge_options = webdriver.EdgeOptions()
    edge_options.add_argument('--headless')
    with webdriver.Edge(options=edge_options) as browser:
        news_url = f'https://so.eastmoney.com/news/s?keyword={stock_code}'
        browser.get(news_url)
        time.sleep(2)
        all_news_data = []
        for _ in range(pages or float('inf')):
            data = browser.page_source
            news_list = extract_news_data(data)
            if not news_list: break
            all_news_data.extend(news_list)
            try:
                next_page_button = browser.find_element(By.CSS_SELECTOR, "a[title='下一页']")
                if next_page_button: next_page_button.click()
                else: break
                time.sleep(2)
            except Exception: break
        return pd.DataFrame(all_news_data)


def spark_api_sentiment_score(text, max_retries=3):
    """调用讯飞星火API来评分新闻情感的函数，确保返回一个有效的评分结果。
    如果结果不是有效的数字，将重试直到获取有效评分或达到最大重试次数。
    """
    appid = "fc65a928"   #填写id ,已除去了
    api_secret = "NjZjZDI3MjNmNmM1NjIwMTI2ODRiYThk" #填写secret ,已除去了
    api_key = "24669cf3f4eecdc6d349169a6af347a0"  #填写api_key ,已除去了
    domain = "generalv3.5"
    Spark_url = "wss://spark-api.xf-yun.com/v3.5/chat"
    


    for attempt in range(max_retries):
        character = {"role": "system", "content": "你现在是财经新闻打分系统，擅长分析新闻对股价的评分，（从极度负面到极度正面，-1到1分），仅输出一个数字(保留小数点后3位)，不要输出其他文本内容"}
        news = {"role": "user", "content": '仅输出一个数字(保留小数点后3位)，不要输出其他文本内容' + text}
        question = [character, news]
        sparkapi.answer = ''
        sparkapi.main(appid, api_key, api_secret, Spark_url, domain, question)
        
        try:
            score = float(sparkapi.answer)
            return score
        except ValueError:
            print(f"尝试 #{attempt+1}: 无法从API响应中提取有效评分: {sparkapi.answer}")
            continue  

   
    raise ValueError(f"在{max_retries}次重试后，仍无法获取有效的评分。")

def sentiment_analysis(news_df):
    """情感分析和计算每天的平均评分。"""
    news_df['score'] = news_df['title'].apply(spark_api_sentiment_score) 
    news_df['date'] = pd.to_datetime(news_df['date']).dt.date
    daily_average_scores = news_df.groupby('date')['score'].mean().reset_index()
    return news_df, daily_average_scores


news_df = get_company_news('601985.SH', 4)
all_scores, daily_scores = sentiment_analysis(news_df)
print(all_scores, daily_scores)
with pd.ExcelWriter('score.xlsx') as writer:
    all_scores.to_excel(writer, sheet_name='A Scores', index=False)
    daily_scores.to_excel(writer, sheet_name='D Scores', index=False)


# In[2]:


import pandas as pd
import tushare as ts
pro = ts.pro_api('08bc6e20b9899a8c5b561d8223e680b69a6a6c34112af69a32b857c6') #填入你的token

# 1. 获取并保存股价数据
data = pro.daily(ts_code='601985.SH', start_date='20240119', end_date='20240607')
data.to_excel('data.xlsx')  # 存储到本地文件，
                
print('股价数据保存成功')

# 2. 读取评分数据和股价数据
score = pd.read_excel('score.xlsx',sheet_name='D Scores')
share = pd.read_excel('data.xlsx')
share = share[['trade_date', 'close']]  # 提取需要的列

# 统一列名称
share.rename(columns={'trade_date': 'date'}, inplace=True)

# 检查数据类型
print('股价日期类型（调整前）:', share['date'].dtype)
print('评分日期类型（调整前）:', score['date'].dtype)

# 转换日期格式
share['date'] = pd.to_datetime(share['date'].astype(str), format='%Y%m%d')

# 再次检查数据类型
print('股价日期类型（调整后）:', share['date'].dtype)
print('评分日期类型（调整后）:', score['date'].dtype)

# 3. 数据合并
merged_data = pd.merge(score, share, on='date', how='inner')
merged_data.rename(columns={'close': 'price'}, inplace=True)

# 导出合并后的数据
merged_data.to_excel('all data.xlsx', index=False)
print('数据合并成功')


# In[3]:


import sys
import datetime
import pandas as pd
import matplotlib.pyplot as plt


plt.rcParams['figure.figsize'] = (8, 6)
# 设置中文格式为黑体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 读取数据
data = pd.read_excel('all data.xlsx')

#  把日期由string字符串格式转为timestamp时间戳格式，方便坐标轴显示
d = []
for i in range(len(data)):
    d.append(data['date'][i])
data['date'] = d  # 将原来的date那一列数据换成新生成的时间戳格式日期

 # 数据可视化并设置双坐标轴
plt.plot(data['date'], data['score'], linestyle='--', label='分数')
plt.xticks(rotation=45)  # 设置x轴刻度显示角度
plt.legend(loc='upper left')  # 分数的图例设置在左上角
plt.twinx()  # 设置双坐标轴
plt.plot(data['date'], data['price'], color='red', label='价格')
plt.xticks(rotation=45)
plt.legend(loc='upper right')
plt.show()


# In[ ]:
