#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
公司信息收集器
收集公司网站内容和招聘信息
"""

import os
import json
import yaml
import time
import logging
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import pandas as pd
from pathlib import Path

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class CompanyInfoCollector:
    """公司信息收集器，用于收集公司网站内容和招聘信息"""
    
    def __init__(self, config_path='config/config.yaml'):
        """
        初始化公司信息收集器
        
        Args:
            config_path: 配置文件路径
        """
        self.config_path = config_path
        self.config = self._load_config()
        self.company_info_config = self.config.get('company_info', {})
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
        }
        
        # 创建保存目录
        self.output_path = self.config.get('system', {}).get('data_storage', {}).get('company_info_path', 'data/company_info/')
        os.makedirs(self.output_path, exist_ok=True)
        
        logger.info("公司信息收集器初始化完成")
    
    def _load_config(self):
        """
        加载配置文件
        
        Returns:
            dict: 配置信息
        """
        try:
            with open(self.config_path, 'r', encoding='utf-8') as f:
                config = yaml.safe_load(f)
            logger.info(f"成功加载配置文件: {self.config_path}")
            return config
        except Exception as e:
            logger.error(f"加载配置文件失败: {e}")
            return {}
    
    def collect_from_stock_list(self, stock_list=None):
        """
        收集指定股票列表的公司信息
        
        Args:
            stock_list: 股票代码列表，如果为None则从配置中读取
            
        Returns:
            bool: 是否成功
        """
        try:
            if stock_list is None:
                # 从配置或分析结果中获取股票列表
                stock_list = self._get_stock_list_from_config()
            
            if not stock_list:
                logger.warning("股票列表为空，无法收集公司信息")
                return False
            
            logger.info(f"准备收集 {len(stock_list)} 支股票的公司信息")
            
            all_company_info = {}
            
            for stock_code in stock_list:
                logger.info(f"开始收集股票 {stock_code} 的公司信息")
                
                # 收集公司基本信息
                company_basic_info = self._get_company_basic_info(stock_code)
                
                # 收集公司网站内容
                website_info = self._collect_website_info(stock_code, company_basic_info.get('website', ''))
                
                # 收集招聘信息
                recruitment_info = self._collect_recruitment_info(stock_code, company_basic_info.get('name', ''))
                
                # 合并信息
                company_info = {
                    'basic_info': company_basic_info,
                    'website_info': website_info,
                    'recruitment_info': recruitment_info,
                    'collected_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }
                
                all_company_info[stock_code] = company_info
                
                # 避免请求过于频繁
                time.sleep(2)
            
            # 保存结果
            self._save_company_info(all_company_info)
            
            logger.info(f"成功收集 {len(all_company_info)} 支股票的公司信息")
            return True
            
        except Exception as e:
            logger.error(f"收集公司信息失败: {e}")
            return False
    
    def _get_stock_list_from_config(self):
        """
        从配置或分析结果中获取股票列表
        
        Returns:
            list: 股票代码列表
        """
        try:
            # 首先尝试从配置中获取
            stock_list = self.company_info_config.get('stock_list', [])
            
            # 如果配置中没有，则从最新的分析结果中获取
            if not stock_list:
                results_path = self.config.get('system', {}).get('data_storage', {}).get('results_path', 'data/results/')
                date = datetime.now().strftime('%Y%m%d')
                
                # 尝试获取今天的数据，如果没有则获取昨天的
                analysis_file = os.path.join(results_path, f'analysis_results_{date}.json')
                if not os.path.exists(analysis_file):
                    date = (datetime.now() - datetime.timedelta(days=1)).strftime('%Y%m%d')
                    analysis_file = os.path.join(results_path, f'analysis_results_{date}.json')
                
                if os.path.exists(analysis_file):
                    with open(analysis_file, 'r', encoding='utf-8') as f:
                        analysis_data = json.load(f)
                    stock_list = list(analysis_data.keys())
            
            # 限制数量，避免一次抓取过多
            max_stocks = self.company_info_config.get('max_stocks_per_collection', 20)
            if len(stock_list) > max_stocks:
                stock_list = stock_list[:max_stocks]
                logger.info(f"限制股票数量为 {max_stocks}")
            
            return stock_list
            
        except Exception as e:
            logger.error(f"获取股票列表失败: {e}")
            return []
    
    def _get_company_basic_info(self, stock_code):
        """
        获取公司基本信息
        
        Args:
            stock_code: 股票代码
            
        Returns:
            dict: 公司基本信息
        """
        try:
            # 这里可以使用tushare等API获取基本信息
            # 或者从公司网站或其他数据源抓取
            
            # 模拟获取基本信息
            company_info = {
                'code': stock_code,
                'name': self._get_company_name(stock_code),
                'website': self._get_company_website(stock_code),
                'industry': '',
                'established_date': '',
                'location': '',
                'description': ''
            }
            
            # 如果有Tushare API Token，可以获取更详细信息
            if self.config.get('stock_market', {}).get('api_token'):
                try:
                    import tushare as ts
                    ts.set_token(self.config['stock_market']['api_token'])
                    pro = ts.pro_api()
                    
                    # 获取股票基本信息
                    df = pro.stock_company(ts_code=self._format_stock_code(stock_code))
                    if not df.empty:
                        row = df.iloc[0]
                        company_info.update({
                            'name': row.get('name', company_info['name']),
                            'website': row.get('website', company_info['website']),
                            'industry': row.get('industry', ''),
                            'established_date': row.get('setup_date', ''),
                            'location': row.get('area', ''),
                            'description': row.get('main_business', '')
                        })
                except Exception as e:
                    logger.warning(f"通过Tushare获取公司信息失败: {e}")
            
            return company_info
            
        except Exception as e:
            logger.error(f"获取公司基本信息失败: {e}")
            return {
                'code': stock_code,
                'name': '',
                'website': ''
            }
    
    def _format_stock_code(self, stock_code):
        """
        格式化股票代码为Tushare格式
        
        Args:
            stock_code: 原始股票代码
            
        Returns:
            str: 格式化后的股票代码
        """
        # Tushare格式: 000001.SZ, 600000.SH, 300000.SZ, 688001.SH
        if '.' in stock_code:
            return stock_code
        
        # 深市股票(000, 001, 002, 003, 300, 301开头)
        if stock_code.startswith(('000', '001', '002', '003', '300', '301')):
            return f"{stock_code}.SZ"
        # 上海科创板(688开头)
        elif stock_code.startswith('688'):
            return f"{stock_code}.SH"
        # 上海主板(600, 601, 603, 605开头)
        elif stock_code.startswith(('600', '601', '603', '605')):
            return f"{stock_code}.SH"
        # 北交所(8开头)
        elif stock_code.startswith('8'):
            return f"{stock_code}.BJ"
        # 港股
        elif stock_code.startswith('00') and len(stock_code) <= 5:
            return f"{stock_code}.HK"
        else:
            return stock_code
    
    def _get_company_name(self, stock_code):
        """
        获取公司名称
        
        Args:
            stock_code: 股票代码
            
        Returns:
            str: 公司名称
        """
        # 可以从本地文件或API获取
        # 这里简单返回股票代码
        return f"公司{stock_code}"
    
    def _get_company_website(self, stock_code):
        """
        获取公司网站地址
        
        Args:
            stock_code: 股票代码
            
        Returns:
            str: 公司网站地址
        """
        # 可以从本地文件或API获取
        # 这里简单返回空字符串
        return ""
    
    def _collect_website_info(self, stock_code, website_url):
        """
        收集公司网站信息
        
        Args:
            stock_code: 股票代码
            website_url: 公司网站地址
            
        Returns:
            dict: 网站信息
        """
        if not website_url:
            logger.warning(f"股票 {stock_code} 没有网站地址")
            return {
                'url': '',
                'about_content': '',
                'news': [],
                'products': [],
                'technologies': [],
                'error': '没有网站地址'
            }
        
        try:
            logger.info(f"开始抓取公司网站: {website_url}")
            
            # 初始化结果
            website_info = {
                'url': website_url,
                'about_content': '',
                'news': [],
                'products': [],
                'technologies': [],
                'last_updated': ''
            }
            
            # 请求首页
            response = requests.get(website_url, headers=self.headers, timeout=10)
            response.raise_for_status()
            response.encoding = response.apparent_encoding
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 提取上次更新时间
            try:
                # 一些常见的更新时间标记
                update_texts = soup.find_all(text=lambda text: '更新' in text or '发布' in text)
                if update_texts:
                    website_info['last_updated'] = update_texts[0].strip()
            except Exception as e:
                logger.warning(f"提取网站更新时间失败: {e}")
            
            # 抓取关于我们页面
            about_urls = []
            for link in soup.find_all('a'):
                href = link.get('href', '')
                text = link.get_text().strip()
                if '关于' in text or '公司介绍' in text or 'about' in href.lower():
                    about_url = urljoin(website_url, href)
                    about_urls.append(about_url)
            
            if about_urls:
                try:
                    about_response = requests.get(about_urls[0], headers=self.headers, timeout=10)
                    about_response.raise_for_status()
                    about_response.encoding = about_response.apparent_encoding
                    
                    about_soup = BeautifulSoup(about_response.text, 'html.parser')
                    
                    # 提取关于我们的内容
                    main_content = about_soup.find('div', {'class': ['main', 'content', 'article']})
                    if main_content:
                        website_info['about_content'] = main_content.get_text().strip()
                    else:
                        website_info['about_content'] = about_soup.get_text().strip()[:1000]  # 限制长度
                except Exception as e:
                    logger.warning(f"抓取关于我们页面失败: {e}")
            
            # 抓取新闻页面
            news_urls = []
            for link in soup.find_all('a'):
                href = link.get('href', '')
                text = link.get_text().strip()
                if '新闻' in text or '动态' in text or 'news' in href.lower():
                    news_url = urljoin(website_url, href)
                    news_urls.append(news_url)
            
            if news_urls:
                try:
                    news_response = requests.get(news_urls[0], headers=self.headers, timeout=10)
                    news_response.raise_for_status()
                    news_response.encoding = news_response.apparent_encoding
                    
                    news_soup = BeautifulSoup(news_response.text, 'html.parser')
                    
                    # 提取新闻列表
                    news_items = news_soup.find_all('div', {'class': ['news-item', 'list-item']}) or \
                                news_soup.find_all('li', {'class': ['news-item', 'list-item']})
                    
                    for item in news_items[:5]:  # 限制数量
                        title_elem = item.find(['h3', 'h4', 'a'])
                        date_elem = item.find(class_=['date', 'time'])
                        
                        title = title_elem.get_text().strip() if title_elem else ''
                        date = date_elem.get_text().strip() if date_elem else ''
                        
                        if title:
                            website_info['news'].append({
                                'title': title,
                                'date': date
                            })
                except Exception as e:
                    logger.warning(f"抓取新闻页面失败: {e}")
            
            # 抓取产品页面
            product_urls = []
            for link in soup.find_all('a'):
                href = link.get('href', '')
                text = link.get_text().strip()
                if '产品' in text or '解决方案' in text or 'product' in href.lower():
                    product_url = urljoin(website_url, href)
                    product_urls.append(product_url)
            
            if product_urls:
                try:
                    product_response = requests.get(product_urls[0], headers=self.headers, timeout=10)
                    product_response.raise_for_status()
                    product_response.encoding = product_response.apparent_encoding
                    
                    product_soup = BeautifulSoup(product_response.text, 'html.parser')
                    
                    # 提取产品列表
                    product_items = product_soup.find_all('div', {'class': ['product-item', 'list-item']}) or \
                                    product_soup.find_all('li', {'class': ['product-item', 'list-item']}) or \
                                    product_soup.find_all(['h3', 'h4'], {'class': ['product-title', 'title']})
                    
                    for item in product_items[:5]:  # 限制数量
                        title_elem = item.find(['h3', 'h4', 'a']) or item
                        title = title_elem.get_text().strip()
                        
                        if title:
                            website_info['products'].append(title)
                except Exception as e:
                    logger.warning(f"抓取产品页面失败: {e}")
            
            # 提取技术关键词
            tech_keywords = ['技术', '研发', '创新', 'AI', '人工智能', '区块链', '云计算', '大数据', 
                             '芯片', '算法', '自动化', '智能制造', '物联网', '5G', '数字化']
            
            text_content = soup.get_text().lower()
            for keyword in tech_keywords:
                if keyword.lower() in text_content:
                    website_info['technologies'].append(keyword)
            
            return website_info
            
        except Exception as e:
            logger.error(f"收集公司网站信息失败: {e}")
            return {
                'url': website_url,
                'about_content': '',
                'news': [],
                'products': [],
                'technologies': [],
                'error': str(e)
            }
    
    def _collect_recruitment_info(self, stock_code, company_name):
        """
        收集公司招聘信息
        
        Args:
            stock_code: 股票代码
            company_name: 公司名称
            
        Returns:
            dict: 招聘信息
        """
        if not company_name:
            logger.warning(f"股票 {stock_code} 没有公司名称")
            return {
                'job_count': 0,
                'positions': [],
                'keywords': [],
                'error': '没有公司名称'
            }
        
        try:
            logger.info(f"开始收集招聘信息: {company_name}")
            
            # 初始化结果
            recruitment_info = {
                'job_count': 0,
                'positions': [],
                'keywords': [],
                'source': '',
                'collected_at': datetime.now().strftime('%Y-%m-%d')
            }
            
            # 从不同招聘网站搜索
            # 这里以模拟数据为例
            # 在实际应用中，可以对接各大招聘网站的API或进行网页抓取
            
            # 模拟从智联招聘抓取
            try:
                recruitment_info['source'] = '模拟数据'
                recruitment_info['job_count'] = 10
                
                # 模拟职位数据
                tech_positions = ['算法工程师', '软件开发工程师', '数据分析师', '研发工程师', 
                                 '产品经理', '运维工程师', '测试工程师', 'AI研究员']
                                 
                business_positions = ['市场经理', '销售经理', '财务分析师', '人力资源专员', 
                                    '运营专员', '客户经理', '商务拓展']
                
                # 随机选择一些职位
                import random
                positions_count = random.randint(3, 8)
                tech_count = random.randint(1, positions_count)
                business_count = positions_count - tech_count
                
                selected_tech = random.sample(tech_positions, min(tech_count, len(tech_positions)))
                selected_business = random.sample(business_positions, min(business_count, len(business_positions)))
                
                # 构建职位信息
                for position in selected_tech:
                    recruitment_info['positions'].append({
                        'title': position,
                        'department': '技术部门',
                        'location': '总部',
                        'education': '本科及以上',
                        'experience': '3-5年'
                    })
                    # 添加技术关键词
                    recruitment_info['keywords'].append('技术研发')
                
                for position in selected_business:
                    recruitment_info['positions'].append({
                        'title': position,
                        'department': '业务部门',
                        'location': '全国',
                        'education': '本科',
                        'experience': '1-3年'
                    })
                    # 添加业务关键词
                    recruitment_info['keywords'].append('业务拓展')
                
                # 去重
                recruitment_info['keywords'] = list(set(recruitment_info['keywords']))
                
            except Exception as e:
                logger.warning(f"模拟招聘数据生成失败: {e}")
            
            # 如果配置了招聘网站API，可以在这里添加实际API调用
            # 例如智联招聘、前程无忧等
            
            return recruitment_info
            
        except Exception as e:
            logger.error(f"收集公司招聘信息失败: {e}")
            return {
                'job_count': 0,
                'positions': [],
                'keywords': [],
                'error': str(e)
            }
    
    def _save_company_info(self, company_info):
        """
        保存公司信息
        
        Args:
            company_info: 公司信息字典，key为股票代码，value为公司信息
            
        Returns:
            bool: 是否成功
        """
        try:
            date = datetime.now().strftime('%Y%m%d')
            output_file = os.path.join(self.output_path, f'company_info_{date}.json')
            
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump(company_info, f, ensure_ascii=False, indent=2)
            
            logger.info(f"成功保存公司信息到: {output_file}")
            return True
            
        except Exception as e:
            logger.error(f"保存公司信息失败: {e}")
            return False

def main():
    """主函数"""
    collector = CompanyInfoCollector()
    collector.collect_from_stock_list()

if __name__ == '__main__':
    main() 