import requests  # 导入requests库用于HTTP请求
from bs4 import BeautifulSoup  # 导入BeautifulSoup库用于解析HTML内容
from datetime import datetime  # 导入datetime模块用于获取日期和时间
import os  # 导入os模块用于文件和目录操作
from logger import LOG  # 导入日志模块

class HackerNewsClient:
    def __init__(self, config=None):
        self.url = 'https://news.ycombinator.com/'  # Hacker News的URL
        self.config = config
        self.session = self._create_session()

    def _create_session(self):
        """创建带有代理配置的requests会话"""
        session = requests.Session()

        # 设置通用请求头
        session.headers.update({
            'User-Agent': 'IntelligentAnalyzer/1.0 (Hacker News Client)'
        })

        # 如果配置了代理，则设置代理
        if self.config and hasattr(self.config, 'proxy_enabled') and self.config.proxy_enabled:
            proxies = {}
            if hasattr(self.config, 'proxy_http') and self.config.proxy_http:
                proxies['http'] = self.config.proxy_http
                LOG.info(f"设置HTTP代理: {self.config.proxy_http}")
            if hasattr(self.config, 'proxy_https') and self.config.proxy_https:
                proxies['https'] = self.config.proxy_https
                LOG.info(f"设置HTTPS代理: {self.config.proxy_https}")

            if proxies:
                session.proxies.update(proxies)
                LOG.info("代理配置已应用到Hacker News客户端")

        return session

    def fetch_top_stories(self):
        LOG.debug("准备获取Hacker News的热门新闻。")
        try:
            # 使用session进行请求，支持代理
            response = self.session.get(self.url, timeout=30)  # 增加超时时间以适应代理
            response.raise_for_status()  # 检查请求是否成功
            LOG.info(f"成功连接到Hacker News，响应状态码: {response.status_code}")
            top_stories = self.parse_stories(response.text)  # 解析新闻数据
            return top_stories
        except requests.exceptions.ProxyError as e:
            LOG.error(f"代理连接失败：{str(e)}")
            return self._fallback_fetch()
        except requests.exceptions.Timeout as e:
            LOG.error(f"连接超时：{str(e)}")
            return self._fallback_fetch()
        except requests.exceptions.ConnectionError as e:
            LOG.error(f"连接错误：{str(e)}")
            return self._fallback_fetch()
        except Exception as e:
            LOG.error(f"获取Hacker News的热门新闻失败：{str(e)}")
            return []

    def _fallback_fetch(self):
        """代理失败时的回退方法，尝试直接连接"""
        LOG.warning("尝试不使用代理直接连接Hacker News...")
        try:
            # 创建一个不使用代理的临时session
            temp_session = requests.Session()
            temp_session.headers.update({
                'User-Agent': 'IntelligentAnalyzer/1.0 (Hacker News Client)'
            })

            response = temp_session.get(self.url, timeout=15)
            response.raise_for_status()
            LOG.info("直接连接成功，已获取Hacker News数据")
            return self.parse_stories(response.text)
        except Exception as e:
            LOG.error(f"直接连接也失败：{str(e)}")
            return []

    def parse_stories(self, html_content):
        LOG.debug("解析Hacker News的HTML内容。")
        soup = BeautifulSoup(html_content, 'html.parser')
        stories = soup.find_all('tr', class_='athing')  # 查找所有包含新闻的<tr>标签
        
        top_stories = []
        for story in stories:
            title_tag = story.find('span', class_='titleline').find('a')
            if title_tag:
                title = title_tag.text
                link = title_tag['href']
                top_stories.append({'title': title, 'link': link})
        
        LOG.info(f"成功解析 {len(top_stories)} 条Hacker News新闻。")
        return top_stories

    def export_top_stories(self, date=None, hour=None):
        LOG.debug("准备导出Hacker News的热门新闻。")
        top_stories = self.fetch_top_stories()  # 获取新闻数据
        
        if not top_stories:
            LOG.warning("未找到任何Hacker News的新闻。")
            return None
        
        # 如果未提供 date 和 hour 参数，使用当前日期和时间
        if date is None:
            date = datetime.now().strftime('%Y-%m-%d')
        if hour is None:
            hour = datetime.now().strftime('%H')

        # 构建存储路径
        dir_path = os.path.join('hacker_news', date)
        os.makedirs(dir_path, exist_ok=True)  # 确保目录存在
        
        file_path = os.path.join(dir_path, f'{hour}.md')  # 定义文件路径
        with open(file_path, 'w', encoding='utf-8') as file:
            file.write(f"# Hacker News Top Stories ({date} {hour}:00)\n\n")
            for idx, story in enumerate(top_stories, start=1):
                file.write(f"{idx}. [{story['title']}]({story['link']})\n")
        
        LOG.info(f"Hacker News热门新闻文件生成：{file_path}")
        return file_path


if __name__ == "__main__":
    # 导入配置
    try:
        from config import Config
        config = Config()
        client = HackerNewsClient(config)
    except ImportError:
        # 如果无法导入配置，使用默认配置
        client = HackerNewsClient()

    client.export_top_stories()  # 默认情况下使用当前日期和时间
