import re
import time
import logging
import os
import datetime
from datetime import timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup

from core.base_spider import BaseSpider
from config.settings import logger, BASE_DIR, HEADERS
import requests


class BTNewsSpider(BaseSpider):
    """
    兵团新闻爬虫
    """
    
    def __init__(self):
        super().__init__()
        
    def _parse_date_arg(self):
        """
        计算目标日期（昨天）
        """
        return datetime.date.today() - datetime.timedelta(days=1)
        
    def _fetch(self, url, encoding=None):
        """
        获取网页内容
        """
        try:
            resp = requests.get(url, headers=HEADERS, timeout=15)
            resp.encoding = encoding or resp.apparent_encoding
            return resp.text
        except Exception as e:
            self.logger.error(f"获取网页内容失败 {url}: {str(e)}")
            return ""
            
    def _parse(self, url, target_date, encoding=None):
        """
        解析网页，提取新闻链接
        """
        html = self._fetch(url, encoding)
        if not html:
            return []
        soup = BeautifulSoup(html, "lxml")
        links = []
        titles = []
        if "huyangnet.cn" in url:
            # 胡杨网解析逻辑 - 支持多种CSS选择器
            for li in soup.select("div.center_page_in li, .listBox li, .news-list li"):
                span = li.find("span")
                dt = span.get_text(strip=True)[:10] if span else ""
                try:
                    if datetime.datetime.strptime(dt, "%Y-%m-%d").date() == target_date:
                        href = li.a["href"]
                        title = li.a.get_text(strip=True)
                        full_url = "https://www.huyangnet.cn" + href if href.startswith("/") else href
                        links.append(full_url)
                        titles.append(title)
                except (ValueError, TypeError):
                    continue
        elif "bt.chinanews.com.cn" in url:
            # 中新网兵团解析逻辑 - 支持多种CSS选择器
            for li in soup.select("div#LB li, .newslist li, .list li"):
                date_span = li.find(class_="date")
                dt = date_span.get_text(strip=True)[1:11] if date_span else ""
                try:
                    if datetime.datetime.strptime(dt, "%Y.%m.%d").date() == target_date:
                        href = li.a["href"]
                        title = li.a.get_text(strip=True)
                        full_url = "https://bt.chinanews.com.cn" + href if href.startswith("/") else href
                        links.append(full_url)
                        titles.append(title)
                except (ValueError, TypeError):
                    continue
        return list(zip(titles, links))
        
    def _extract_body(self, url):
        """
        提取新闻正文
        """
        try:
            html = self._fetch(url)
            soup = BeautifulSoup(html, "lxml")
            # 尝试多种方式提取正文内容
            desc = soup.find("meta", attrs={"name": "description"})
            if desc and desc.get("content"):
                return desc["content"].strip()
            
            # 查找文章正文区域
            content_div = soup.find("div", class_="content") or soup.find("div", id="content") or soup.find("div", class_="article-content")
            if content_div:
                # 提取所有段落文本
                paragraphs = content_div.find_all('p')
                if paragraphs:
                    return ' '.join([p.get_text(strip=True) for p in paragraphs])
            
            # 最后尝试提取第一个段落
            p = soup.find("p")
            return p.get_text(strip=True) if p else ""
        except Exception as e:
            self.logger.error(f"提取正文内容失败 {url}: {str(e)}")
            return ""
            
    def crawl(self):
        """
        爬取兵团新闻
        """
        self.logger.info("开始获取兵团新闻...")
        target_date = self._parse_date_arg()
        urls_index = {
            "胡杨网": "http://www.huyangnet.cn/node_60259.html",
            "中新网兵团": "https://www.bt.chinanews.com.cn/bingtuan/index.shtml"
        }
        all_links = []
        for site, url in urls_index.items():
            enc = "gb2312" if "bt.chinanews.com.cn" in url else None
            links = self._parse(url, target_date, enc)
            all_links.extend(links)
            self.logger.info(f"{site} 找到 {len(links)} 篇新闻")
        if not all_links:
            self.logger.info(f"未找到 {target_date} 的兵团文章")
            # 创建一个空文件表示没有找到新闻
            today_date = datetime.date.today().strftime("%Y%m%d")
            raw_filename = os.path.join(BASE_DIR, "yuanshineirong", f"原始内容_{today_date}.txt")
            with open(raw_filename, 'a', encoding='utf-8') as raw_file:
                raw_file.write("\n\n======= 兵团新闻原始内容 =======\n\n")
                raw_file.write("今日未找到兵团新闻\n")
            return ["无"]
        
        # 创建原始内容保存目录
        today_date = datetime.date.today().strftime("%Y%m%d")
        raw_filename = os.path.join(BASE_DIR, "yuanshineirong", f"原始内容_{today_date}.txt")
        
        # 提取并保存原始内容
        seen = set()
        cleaned_lines = []
        with open(raw_filename, 'a', encoding='utf-8') as raw_file:
            raw_file.write("\n\n======= 兵团新闻原始内容 =======\n\n")
            for title, link in all_links:
                body = self._extract_body(link)
                if body and body not in seen:
                    seen.add(body)
                    cleaned_lines.append(body)
                    # 写入标题、链接和内容
                    raw_file.write(f"{title}\n")
                    raw_file.write(f"链接: {link}\n")
                    raw_file.write(f"内容: {body}\n\n")
        
        self.logger.info("兵团新闻原始内容已保存")