import requests
from bs4 import BeautifulSoup
import json
import time
import random
import re
from urllib.parse import urljoin, quote, urlparse
import os
import logging
from typing import List, Dict, Optional, Tuple
from fake_useragent import UserAgent

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("imt_crawler.log"),
        logging.StreamHandler()
    ]
)

class IMTLinkCrawler:
    def __init__(self):
        self.base_url = "https://www.imtlink.com/"
        self.session = requests.Session()
        self.ua = UserAgent()
        self.update_headers()
        self.retry_count = 3
        self.timeout = 20
        
    def update_headers(self):
        """更新请求头"""
        self.session.headers.update({
            'User-Agent': self.ua.random,
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
        })
    
    def make_request(self, url: str, params: Dict = None, method: str = "GET") -> Optional[requests.Response]:
        """发送请求，包含重试机制"""
        for attempt in range(self.retry_count):
            try:
                self.update_headers()  # 每次请求更新UA
                if method.upper() == "GET":
                    response = self.session.get(url, params=params, timeout=self.timeout)
                else:
                    response = self.session.post(url, data=params, timeout=self.timeout)
                
                response.raise_for_status()
                logging.info(f"成功获取: {url} (尝试 {attempt + 1})")
                return response
                
            except requests.exceptions.RequestException as e:
                logging.warning(f"请求失败 {url} (尝试 {attempt + 1}): {e}")
                if attempt < self.retry_count - 1:
                    sleep_time = (attempt + 1) * 2 + random.uniform(0, 1)
                    logging.info(f"等待 {sleep_time:.2f} 秒后重试...")
                    time.sleep(sleep_time)
                else:
                    logging.error(f"所有重试失败: {url}")
                    return None
    
    def fetch_page(self, url: str, params: Dict = None) -> Optional[str]:
        """获取页面内容"""
        response = self.make_request(url, params)
        if response:
            # 尝试多种编码
            encodings = ['utf-8', 'gbk', 'gb2312', 'iso-8859-1']
            for encoding in encodings:
                try:
                    response.encoding = encoding
                    return response.text
                except:
                    continue
            # 如果所有编码都失败，使用apparent_encoding
            response.encoding = response.apparent_encoding
            return response.text
        return None
    
    def is_valid_url(self, url: str) -> bool:
        """检查URL是否有效"""
        if not url or url.startswith('javascript:') or url.startswith('mailto:'):
            return False
        
        parsed = urlparse(url)
        if not parsed.netloc:  # 相对URL
            return True
            
        # 只处理本站点或相关子域的URL
        base_domain = urlparse(self.base_url).netloc
        return parsed.netloc == base_domain or parsed.netloc.endswith('.' + base_domain)
    
    def normalize_url(self, url: str) -> str:
        """规范化URL"""
        if not url:
            return ""
            
        # 处理相对URL
        if not url.startswith(('http://', 'https://')):
            url = urljoin(self.base_url, url)
            
        # 移除片段标识符和不需要的参数
        parsed = urlparse(url)
        clean_url = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
        if parsed.query:
            # 保留可能有用的查询参数
            query_params = []
            for param in parsed.query.split('&'):
                if any(key in param for key in ['id=', 'page=', 'category=', 'type=']):
                    query_params.append(param)
            if query_params:
                clean_url += '?' + '&'.join(query_params)
                
        return clean_url
    
    def extract_links(self, html: str, base_url: str) -> List[str]:
        """从HTML中提取所有有效链接"""
        if not html:
            return []
            
        soup = BeautifulSoup(html, 'html.parser')
        links = set()
        
        # 提取所有<a>标签的href
        for a_tag in soup.find_all('a', href=True):
            href = a_tag['href'].strip()
            if self.is_valid_url(href):
                full_url = self.normalize_url(href)
                links.add(full_url)
                
        # 提取其他可能包含链接的元素
        for element in soup.select('[data-href], [data-url], [data-link]'):
            for attr in ['data-href', 'data-url', 'data-link']:
                if attr in element.attrs:
                    href = element[attr].strip()
                    if self.is_valid_url(href):
                        full_url = self.normalize_url(href)
                        links.add(full_url)
        
        return list(links)
    
    def get_homepage(self) -> Dict:
        """获取首页内容"""
        logging.info("正在获取首页内容...")
        html = self.fetch_page(self.base_url)
        if not html:
            return {"error": "无法获取首页内容"}
        
        soup = BeautifulSoup(html, 'html.parser')
        result = {
            "title": soup.title.get_text().strip() if soup.title else "无标题",
            "url": self.base_url,
            "timestamp": time.strftime('%Y-%m-%d %H:%M:%S'),
            "navigation": [],
            "featured_content": [],
            "latest_updates": []
        }
        
        # 提取导航菜单
        nav_selectors = [
            'nav', '.navbar', '.menu', '.navigation', 
            'header ul', '.header ul', '#menu', '.main-menu'
        ]
        
        for selector in nav_selectors:
            nav_elements = soup.select(selector)
            for nav in nav_elements:
                for link in nav.select('a[href]'):
                    href = link['href']
                    if self.is_valid_url(href):
                        result["navigation"].append({
                            "text": link.get_text().strip(),
                            "url": self.normalize_url(href)
                        })
        
        # 提取特色内容（轮播图、推荐等）
        carousel_selectors = [
            '.carousel', '.slider', '.banner', '.swiper',
            '.featured', '.recommended', '.hot'
        ]
        
        for selector in carousel_selectors:
            elements = soup.select(selector)
            for element in elements:
                for item in element.select('.item, .slide, .banner-item'):
                    link = item.find('a')
                    if link and link.get('href'):
                        result["featured_content"].append({
                            "title": link.get('title', '') or (link.find('img') and link.find('img').get('alt', '')) or "",
                            "url": self.normalize_url(link['href']),
                            "image": link.find('img')['src'] if link.find('img') else ""
                        })
        
        # 提取最新更新
        update_selectors = [
            '.new', '.latest', '.update', '.recent',
            '.video-list', '.post-list', '.content-list'
        ]
        
        for selector in update_selectors:
            elements = soup.select(selector)
            for element in elements:
                for item in element.select('.item, .post, .video, article'):
                    link = item.find('a')
                    if link and link.get('href'):
                        title = item.select_one('h1, h2, h3, h4, .title, .name')
                        result["latest_updates"].append({
                            "title": title.get_text().strip() if title else "",
                            "url": self.normalize_url(link['href']),
                            "image": item.find('img')['src'] if item.find('img') else "",
                            "time": item.select_one('.time, .date').get_text().strip() if item.select_one('.time, .date') else ""
                        })
        
        # 去重
        result["navigation"] = list({item["url"]: item for item in result["navigation"]}.values())
        result["featured_content"] = list({item["url"]: item for item in result["featured_content"]}.values())
        result["latest_updates"] = list({item["url"]: item for item in result["latest_updates"]}.values())
        
        return result
    
    def get_categories(self) -> List[Dict]:
        """获取分类列表"""
        logging.info("正在获取分类列表...")
        homepage = self.get_homepage()
        
        # 从导航中提取可能的分类链接
        categories = []
        for item in homepage.get("navigation", []):
            url = item["url"]
            text = item["text"]
            
            # 通过URL和文本判断是否为分类
            if any(keyword in url.lower() or keyword in text.lower() 
                  for keyword in ['category', 'type', 'genre', '分类', '类别', '类型']):
                categories.append({
                    "name": text,
                    "url": url,
                    "slug": url.split('/')[-1] or text
                })
        
        # 如果没有找到分类，尝试常见的分类URL模式
        if not categories:
            common_category_paths = [
                '/category/', '/categories/', '/type/', '/genre/',
                '/list/', '/channel/', '/sort/', '/class/'
            ]
            
            # 获取首页所有链接
            html = self.fetch_page(self.base_url)
            if html:
                all_links = self.extract_links(html, self.base_url)
                for link in all_links:
                    for path in common_category_paths:
                        if path in link:
                            # 尝试从URL提取分类名
                            parts = link.split('/')
                            name = parts[-2] if parts[-1] == '' else parts[-1]
                            categories.append({
                                "name": name.replace('-', ' ').title(),
                                "url": link,
                                "slug": name
                            })
                            break
        
        return categories
    
    def get_category_content(self, category_url: str, page: int = 1) -> Dict:
        """获取分类下的内容"""
        logging.info(f"正在获取分类内容: {category_url}, 第{page}页")
        
        # 处理分页
        paginated_url = category_url
        if page > 1:
            if '?' in category_url:
                paginated_url += f'&page={page}'
            else:
                paginated_url += f'?page={page}'
        
        html = self.fetch_page(paginated_url)
        if not html:
            return {"error": f"无法获取分类内容: {category_url}"}
        
        soup = BeautifulSoup(html, 'html.parser')
        result = {
            "category_url": category_url,
            "page": page,
            "items": [],
            "has_next": False,
            "total_count": 0
        }
        
        # 尝试多种内容选择器
        content_selectors = [
            '.video-list .item', '.post-list .item', '.content-item', 
            '.movie-item', '.article', '.product', '.card',
            '.list .item', '.items .item', '.grid-item'
        ]
        
        for selector in content_selectors:
            items = soup.select(selector)
            if items:
                for item in items:
                    link = item.find('a')
                    if not link or not link.get('href'):
                        continue
                    
                    title = item.select_one('h1, h2, h3, h4, .title, .name')
                    image = item.find('img')
                    
                    result["items"].append({
                        "title": title.get_text().strip() if title else "",
                        "url": self.normalize_url(link['href']),
                        "image": image['src'] if image and image.get('src') else "",
                        "description": item.select_one('.desc, .description, .excerpt').get_text().strip() 
                                      if item.select_one('.desc, .description, .excerpt') else "",
                        "meta": {}
                    })
                break
        
        # 检查是否有下一页
        next_selectors = ['.next', '.next-page', '.pagination-next', 'a[rel=next]']
        for selector in next_selectors:
            next_link = soup.select_one(selector)
            if next_link and next_link.get('href'):
                result["has_next"] = True
                break
        
        result["total_count"] = len(result["items"])
        return result
    
    def get_detail(self, detail_url: str) -> Dict:
        """获取详情页信息"""
        logging.info(f"正在获取详情: {detail_url}")
        html = self.fetch_page(detail_url)
        if not html:
            return {"error": f"无法获取详情页: {detail_url}"}
        
        soup = BeautifulSoup(html, 'html.parser')
        result = {
            "url": detail_url,
            "title": "",
            "content": "",
            "images": [],
            "metadata": {},
            "related_links": []
        }
        
        # 提取标题
        title_selectors = ['h1', '.title', '.post-title', '.product-title', '.entry-title']
        for selector in title_selectors:
            title = soup.select_one(selector)
            if title:
                result["title"] = title.get_text().strip()
                break
        
        # 提取主要内容
        content_selectors = ['.content', '.entry-content', '.post-content', '.description', '.detail']
        for selector in content_selectors:
            content = soup.select_one(selector)
            if content:
                result["content"] = content.get_text().strip()
                break
        
        # 提取图片
        for img in soup.select('img'):
            if img.get('src'):
                result["images"].append(self.normalize_url(img['src']))
        
        # 提取元数据
        meta_selectors = ['.meta', '.info', '.details', '.attributes']
        for selector in meta_selectors:
            meta_items = soup.select(selector)
            for meta in meta_items:
                for item in meta.select('.meta-item, .info-item, .attr'):
                    key = item.select_one('.key, .label, .name')
                    value = item.select_one('.value, .content')
                    if key and value:
                        result["metadata"][key.get_text().strip()] = value.get_text().strip()
        
        # 提取相关链接
        related_selectors = ['.related', '.similar', '.recommendations']
        for selector in related_selectors:
            related_items = soup.select(selector)
            for related in related_items:
                for link in related.select('a[href]'):
                    if self.is_valid_url(link['href']):
                        result["related_links"].append({
                            "text": link.get_text().strip(),
                            "url": self.normalize_url(link['href'])
                        })
        
        return result
    
    def search(self, keyword: str, page: int = 1) -> Dict:
        """搜索功能"""
        logging.info(f"正在搜索: {keyword}, 第{page}页")
        
        # 尝试常见的搜索端点
        search_endpoints = [
            '/search', '/search.php', '/search.html',
            '/s', '/find', '/query', '/searchresult'
        ]
        
        results = {"keyword": keyword, "page": page, "results": [], "total": 0}
        
        for endpoint in search_endpoints:
            search_url = urljoin(self.base_url, endpoint)
            params = {'q': keyword, 'keyword': keyword, 's': keyword, 'search': keyword}
            
            if page > 1:
                params['page'] = page
            
            html = self.fetch_page(search_url, params)
            if not html:
                continue
                
            soup = BeautifulSoup(html, 'html.parser')
            
            # 尝试多种搜索结果选择器
            result_selectors = [
                '.search-result', '.result-item', '.search-item',
                '.item', '.product', '.post', '.video'
            ]
            
            for selector in result_selectors:
                items = soup.select(selector)
                if items:
                    for item in items:
                        link = item.find('a')
                        if not link or not link.get('href'):
                            continue
                            
                        title = item.select_one('h1, h2, h3, h4, .title, .name')
                        result_item = {
                            "title": title.get_text().strip() if title else "",
                            "url": self.normalize_url(link['href']),
                            "snippet": item.get_text().strip()[:200] + "..." if len(item.get_text().strip()) > 200 else item.get_text().strip()
                        }
                        
                        # 尝试提取图片
                        image = item.find('img')
                        if image and image.get('src'):
                            result_item["image"] = self.normalize_url(image['src'])
                            
                        results["results"].append(result_item)
                    
                    results["total"] = len(results["results"])
                    return results
        
        return results
    
    def save_data(self, data, filename: str):
        """保存数据到文件"""
        os.makedirs('data', exist_ok=True)
        filepath = f"data/{filename}"
        
        try:
            if filename.endswith('.json'):
                with open(filepath, 'w', encoding='utf-8') as f:
                    json.dump(data, f, ensure_ascii=False, indent=2)
            elif filename.endswith('.txt'):
                with open(filepath, 'w', encoding='utf-8') as f:
                    if isinstance(data, dict):
                        for key, value in data.items():
                            f.write(f"{key}: {value}\n")
                    else:
                        f.write(str(data))
            
            logging.info(f"数据已保存到: {filepath}")
            return True
        except Exception as e:
            logging.error(f"保存文件失败 {filepath}: {e}")
            return False

# 使用示例
def main():
    crawler = IMTLinkCrawler()
    
    # 1. 获取首页内容
    homepage = crawler.get_homepage()
    crawler.save_data(homepage, 'homepage.json')
    
    # 2. 获取分类
    categories = crawler.get_categories()
    crawler.save_data(categories, 'categories.json')
    print(f"找到 {len(categories)} 个分类")
    
    # 3. 获取第一个分类的内容
    if categories:
        category_content = crawler.get_category_content(categories[0]['url'])
        crawler.save_data(category_content, 'category_content.json')
        print(f"分类 '{categories[0]['name']}' 中找到 {len(category_content.get('items', []))} 个项目")
    
    # 4. 获取详情（使用首页的第一个链接）
    if homepage.get('latest_updates'):
        detail_url = homepage['latest_updates'][0]['url']
        detail = crawler.get_detail(detail_url)
        crawler.save_data(detail, 'detail.json')
        print(f"获取详情: {detail.get('title', '无标题')}")
    
    # 5. 搜索示例
    search_results = crawler.search("电影")
    crawler.save_data(search_results, 'search_results.json')
    print(f"搜索到 {len(search_results.get('results', []))} 个结果")

if __name__ == "__main__":
    main()