import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import datetime
import time
import browser_cookie3
import os
import platform
import undetected_chromedriver as uc  # 替换原来的 selenium webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from urllib.parse import urlencode
import json
from sqlalchemy.exc import IntegrityError
from models.shopify_post import Session, ShopifyPost
from selenium.common.exceptions import TimeoutException
import ssl

class ShopifyCrawler:
    def __init__(self, tid):
        self.base_url = "https://community.shopify.com/plugins/custom/shopify/shopify/custom.home_get_recent_discussions"
        self.params = {
            'tid': tid,
            'limit': '20',
            'offset': '60',
            'tab_value': '',
            'board_filter': '',
            'metrics_filter': 'post_time',
            'core_node': 'shopify-discussion',
            'core_type': 'board',
            'page_name': 'ForumsFilteredByLabelPage',
            'root_category_id': 'en',
            'label_name': 'Troubleshooting'
        }
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Referer': 'https://community.shopify.com/'
        }
        self.session = requests.Session()
        self.db_session = Session()

    def load_browser_cookies(self, browser='chrome'):
        """从浏览器加载 cookies"""
        try:
            print(f"尝试从 {browser} 加载 cookies...")
            
            # 确保 Chrome 已关闭
            if browser == 'chrome':
                print("提示：请确保 Chrome 浏览器已完全关闭")
                
                try:
                    # 尝试使用 chrome-cookies-secure
                    import chrome_cookies
                    cookies = chrome_cookies.load_cookies('community.shopify.com')
                    for cookie in cookies:
                        self.session.cookies.set(
                            cookie['name'],
                            cookie['value'],
                            domain='community.shopify.com'
                        )
                    print("成功使用 chrome-cookies-secure 加载 cookies")
                    return True
                except Exception as chrome_err:
                    print(f"使用 chrome-cookies-secure 失败: {chrome_err}")
                    
                    try:
                        # 回退到 browser_cookie3
                        cj = browser_cookie3.chrome(
                            domain_name='community.shopify.com',
                            cookie_file=None,  # 自动检测 Cookie 文件位置
                            key_file=None,     # 自动检测密钥文件位置
                        )
                        for cookie in cj:
                            self.session.cookies.set_cookie(cookie)
                        print("成功使用 browser_cookie3 加载 cookies")
                        return True
                    except Exception as e:
                        print(f"使用 browser_cookie3 失败: {e}")
                        
                        # 提供手动操作指导
                        # print("\n无法自动获取 Chrome cookies，请按以下步骤手动获取：")
                        # print("1. 打开 Chrome 浏览器")
                        # print("2. 访问 Shopify 社区网站并登录")
                        # print("3. 按 F12 打开开发者工具")
                        # print("4. 切换到 Network 标签")
                        # print("5. 刷新页面")
                        # print("6. 在请求列表中点击任意请求")
                        # print("7. 在请求头中找到 'Cookie' 字段")
                        # print("8. 复制整个 cookie 字符串")
                        # print("9. 使用 set_cookies_manually() 方法设置 cookies\n")
                        cookies_str = '''
            _y=fc627d1b-8987-43f4-9101-ab6c6a24ece2; _gcl_aw=GCL.1737097689.Cj0KCQiA-aK8BhCDARIsAL_-H9kNc9F5HMJchHwHOJRzDQNEiOPq_-EzMBAndtS-vz6hVmKEWtOPqfAaAtU5EALw_wcB; _gcl_gs=2.1.k1$i1737097685; _ga=GA1.1.96543868.1737097696; FPID=FPID2.2.sFv99ErKK7DzQzypzwWavDO77uMjChsR1zo3va3GCDU%3D.1737097696; FPAU=1.2.1389800299.1737097697; master_device_id=a8ddd504-50ad-4dfc-a949-f76158499186; logged_in=true; is_shopify_merchant=1; _gcl_au=1.1.1229995962.1737098591; _shopify_y=fc627d1b-8987-43f4-9101-ab6c6a24ece2; _merchant_essential=%3AAZSBaZloAAEASq6_eNEJ9v0ozG-ihlZBbePoGa90x3uE6AY4L9YJtF3Fn3I1wVAVF5_x%3A; _merchant_marketing=%3AAZSCIVSQAAEAxNlEaUT0gvNQWr1_SZN85whOn8-0s82vsVVwgdpI7WNwln-I%3A; mto_pvs=7; LiSESSIONID=949C56CBAACABA46FCBEDA4A4E43B6B1; lithiumSSO:covpk78932=~2EtNl1W3KCJaGwqzV~AxJEDBBSmMGs6lQ8t9SlVloUgQUIHnS1a8eJ73URMgpzhwjpoiWeYKkPTwsUQVBR1zUkqaBRWQ3b70tqQjQtMuDNP2Aq5QzkZgtUMp4dMl8X--XEqmIOVjUo2tLc1q-nF2varaPlEH31KpeXfae4LeE5AqjSQCjoC4RE7PHc0UrB8ohxOfZAx7DWRY1JGqTKhWy5HSaXbws1UdHks8CPq6-jYhcXI8fZD-0zf7q7BROFHizek0XVkuqDmbvbD42FQtu6hDcg9ElfKimR8SFWbQ80AkKZlQY8pHGwuluwqSGdWbJvmeqYPMQLbe4Bbh8CwD-qHON53p_CxVrl26JRBsux0rlU9k6NGiH3hOYvjbS8uTJ8aaV3snAxr_yc9C96IYvWCivMYw5kU29y99mZpVDNNBxtLVc3LhVnqS3rcnYOEAOZHGTLrsxWCT_l1WetKYmPhIbnbi5k6xxrLgpXQXNqg7hDhOKdb-C_LJH8tYc.; !lithiumSSO=~2EtNl1W3KCJaGwqzV~AxJEDBBSmMGs6lQ8t9SlVloUgQUIHnS1a8eJ73URMgpzhwjpoiWeYKkPTwsUQVBR1zUkqaBRWQ3b70tqQjQtMuDNP2Aq5QzkZgtUMp4dMl8X--XEqmIOVjUo2tLc1q-nF2varaPlEH31KpeXfae4LeE5AqjSQCjoC4RE7PHc0UrB8ohxOfZAx7DWRY1JGqTKhWy5HSaXbws1UdHks8CPq6-jYhcXI8fZD-0zf7q7BROFHizek0XVkuqDmbvbD42FQtu6hDcg9ElfKimR8SFWbQ80AkKZlQY8pHGwuluwqSGdWbJvmeqYPMQLbe4Bbh8CwD-qHON53p_CxVrl26JRBsux0rlU9k6NGiH3hOYvjbS8uTJ8aaV3snAxr_yc9C96IYvWCivMYw5kU29y99mZpVDNNBxtLVc3LhVnqS3rcnYOEAOZHGTLrsxWCT_l1WetKYmPhIbnbi5k6xxrLgpXQXNqg7hDhOKdb-C_LJH8tYc.; ValueSurveyParticipation=1737525507590; ValueSurveyVisitorCount=~2z6nYPoviF2Fz4r71~Egrn9kdsNdMDuU1mqoKRPsIxo7Oz1onzNEM0PWuYbAA.; _ga_W6NECZNE63=GS1.1.1737604709.5.1.1737605428.0.0.1367651189; __cf_bm=Aq7qpNf8n10GDZpbqfwvWK2auVmakv6cn9CIdNMMjyI-1737696317-1.0.1.1-.TsOIO0S2_qXkwswKadfooBu2Gio3DUM1ogkEdlQc2bX3yup3ZXhPP.6txY52PuZHgksgLHCIE93_Y2mwH3eaA; _shopify_s=e6f2eafd-af52-44fb-868e-2a6206d89e1f; _ga_825ZH3XNS0=GS1.1.1737696324.14.1.1737696471.59.0.0; VISITOR_BEACON=~2rklFHBMImPkAkGV8~0ARClq1LBEmxIkDL3PfY1vsrULwJAguHWvd0Zq-4nZDwLclK-gsMFKPkN60i6EVccKBZS0ug0GPoQWoW-d9eSw..; AWSALB=0Y5SURH2Owba/9JL3gDOge0L94kiXl0IGmhQh06nFzaAB+M+pXWdsfk+AT8+LyFUHJTtLWPWkOAj54200DKWSBvss5ZfzpJZHCcm5pacMeGokDeouYFtWoMCZ0Wj; AWSALBCORS=0Y5SURH2Owba/9JL3gDOge0L94kiXl0IGmhQh06nFzaAB+M+pXWdsfk+AT8+LyFUHJTtLWPWkOAj54200DKWSBvss5ZfzpJZHCcm5pacMeGokDeouYFtWoMCZ0Wj; LithiumVisitor=~20RoCvQ8WERtAZoRa~mBbyHvW4nm1F0eIJwG9diY6y_9MUthMiZPcQkQmU5Dx8DbhguHQATPjzWhAaGEE0UeOwqrNXAdLW1LSmhGVEbg..
                        '''
                        return  self.set_cookies_manually(cookies_str)
                        print("手动加载浏览器 cookies，请确保已在浏览器中登录")

            elif browser == 'firefox':
                cj = browser_cookie3.firefox(domain_name='community.shopify.com')
            elif browser == 'edge':
                cj = browser_cookie3.edge(domain_name='community.shopify.com')
            elif browser == 'safari':
                cj = browser_cookie3.safari(domain_name='community.shopify.com')
            else:
                raise ValueError(f"不支持的浏览器类型: {browser}")
            
            # 更新 session cookies
            for cookie in cj:
                self.session.cookies.set_cookie(cookie)
            
            print(f"已成功从 {browser} 加载 cookies")
            return True
            
        except Exception as e:
            print(f"加载浏览器 cookies 时出错: {e}")
            print("错误类型:", type(e).__name__)
            print("错误详情:", str(e))
            return False

    def get_chrome_cookies_path(self):
        """获取 Chrome cookies 文件路径"""
        system = platform.system()
        if system == 'Linux':
            return os.path.expanduser('~/.config/google-chrome/Default/Cookies')
        elif system == 'Darwin':  # macOS
            return os.path.expanduser('~/Library/Application Support/Google/Chrome/Default/Cookies')
        elif system == 'Windows':
            return os.path.expanduser('~\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\Cookies')
        else:
            raise OSError(f"不支持的操作系统: {system}")

    def set_cookies_manually(self, cookies_str):
        """手动设置 cookies"""
        try:
            # 解析 cookies 字符串
            for cookie in cookies_str.split(';'):
                if '=' in cookie:
                    name, value = cookie.strip().split('=', 1)
                    self.session.cookies.set(name, value, domain='community.shopify.com')
            print("已手动设置 cookies")
            return True

        except Exception as e:
            print(f"设置 cookies 时出错: {e}")
            return False

    def fetch_page(self, page_num=1):
        """使用 undetected-chromedriver 获取页面内容"""
        driver = None
        retry_count = 3
        
        try:
            
            # 构建完整的URL
            params = self.params.copy()
            params['offset'] = str((page_num - 1) * int(params['limit']))
            full_url = f"{self.base_url}?{urlencode(params)}"
            
            print(f"正在爬取URL: {full_url}")
            
            # 配置 SSL 上下文
            ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            
            # 配置 Chrome 选项
            chrome_options = uc.ChromeOptions()
            chrome_options.add_argument('--no-sandbox')
            chrome_options.add_argument('--disable-dev-shm-usage')
            
            # 添加重试机制
            for attempt in range(retry_count):
                try:
                    # 使用最简单的配置
                    driver = uc.Chrome(
                        version_main=131,
                        options=chrome_options
                    )
                    driver.set_page_load_timeout(60)
                    
                    # 先访问主域名
                    print("正在访问主域名...")
                    driver.get('https://community.shopify.com')
                    time.sleep(3)
                    
                    # 设置 cookies
                    print("正在设置 cookies...")
                    for cookie in self.session.cookies:
                        try:
                            cookie_dict = {
                                'name': cookie.name,
                                'value': cookie.value,
                                'domain': '.shopify.com'
                            }
                            driver.add_cookie(cookie_dict)
                        except Exception as e:
                            print(f"设置 cookie 失败: {cookie.name} - {str(e)}")
                            continue
                    
                    time.sleep(2)
                    
                    # 获取目标页面
                    print(f"正在获取页面: {full_url}")
                    driver.get(full_url)
                    
                    # 等待页面加载
                    try:
                        wait = WebDriverWait(driver, timeout=30, poll_frequency=1)
                        wait.until(lambda d: d.find_element(By.CLASS_NAME, 'res-data'))
                    except TimeoutException:
                        print("等待页面元素超时，继续尝试...")
                        
                    # 获取页面源码
                    page_source = driver.page_source
                    
                    # 使用 BeautifulSoup 解析内容
                    soup = BeautifulSoup(page_source, 'html.parser')
                    
                    # 验证是否成功获取内容
                    if not soup.find_all('div', class_='res-data'):
                        raise Exception("未找到帖子列表内容")
                    
                    print(f"成功获取页面: {full_url}")
                    return soup
                    
                except Exception as e:
                    print(f"第 {attempt + 1} 次尝试失败: {str(e)}")
                    if driver:
                        driver.quit()
                        # 方法1：使用 refresh() 方法
                        driver.refresh()
                    if attempt == retry_count - 1:
                        raise e
                    time.sleep(5 * (attempt + 1))
                    
        except Exception as e:
            print(f"获取页面时出错: {str(e)}")
            return None
            
        finally:
            if driver:
                try:
                    driver.quit()
                except Exception as e:
                    print(f"关闭浏览器时出错: {str(e)}")

    def save_to_database(self, topic_data):
        """保存数据到数据库"""
        try:
            # 创建新的帖子记录
            post = ShopifyPost(
                title=topic_data['title'],
                category=topic_data['category'],
                author=topic_data['author'],
                post_date=topic_data['date'],
                url=topic_data['url'],
                views=int(topic_data['views']),
                replies=int(topic_data['replies']),
                content=topic_data['content']
            )

            # 添加到数据库
            self.db_session.add(post)
            self.db_session.commit()
            print(f"成功保存帖子到数据库: {topic_data['title']}")

        except IntegrityError:
            self.db_session.rollback()
            print(f"帖子已存在: {topic_data['url']}")
        except Exception as e:
            self.db_session.rollback()
            print(f"保存到数据库时出错: {e}")

    def parse_topics(self, soup):
        """解析页面中的主题列表"""
        topics = []
        topic_elements = soup.find_all('div', class_='res-data')
        
        for topic in topic_elements:
            try:
                title_element = topic.find('div', class_='res-subject')
                author_element = topic.find('div', class_='res-login')
                date_element = topic.find('div', class_='res-time')
                metrics = topic.find('div', class_='res-metrics')
                category = topic.find('div', class_='res-category')
                
                # 获取帖子URL
                post_url = 'https://community.shopify.com' + title_element.find('a')['href'] if title_element and title_element.find('a') else ''
                
                # 获取帖子内容
                post_content = ''
                if post_url:
                    post_content = self.fetch_post_content(post_url)
                    time.sleep(1)  # 添加延迟，避免请求过快
                
                topic_data = {
                    'title': title_element.find('a').text.strip() if title_element.find('a') else '',
                    'category': category.find('a').text.strip() if category.find('a') else '',
                    'author': author_element.find('a').text.strip() if author_element.find('a') else '',
                    'date': date_element.text.strip() if date_element else '',
                    'url': post_url,
                    'views': metrics.find('div', class_='res-views').text.strip() if metrics else '0',
                    'replies': metrics.find('div', class_='res-replies').text.strip() if metrics else '0',
                    'content': post_content  # 添加帖子内容
                }
                
                # 保存到数据库
                self.save_to_database(topic_data)
                topics.append(topic_data)
                print(f"已爬取帖子：{topic_data['title']}")
                
            except Exception as e:
                print(f"解析主题时出错: {e}")
                continue
                
        return topics
    
    def check_invalid_user(self, driver):
        """检查是否出现无效用户提示"""
        try:
            # 等待一小段时间，确保页面加载完成
            wait = WebDriverWait(driver, 5)
            
            # 尝试多种可能的文本匹配
            invalid_texts = [
                'Invalid User',
                'invalid user',
                'Invalid user',
                'User Invalid',
                'Authentication Failed'
            ]
            
            for text in invalid_texts:
                try:
                    xpath = f"//*[contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), '{text.lower()}')]"
                    invalid_msg = wait.until(
                        EC.presence_of_element_located((By.XPATH, xpath))
                    )
                    if invalid_msg.is_displayed():
                        print(f"检测到无效用户提示: {invalid_msg.text}")
                        return True
                except TimeoutException:
                    continue
            
            return False
            
        except Exception as e:
            print(f"检查无效用户时出错: {str(e)}")
            return False

    def wait_for_content(self, driver, max_retries=3):
        """等待内容加载，处理无效用户提示"""
        wait = WebDriverWait(driver, 20)
        retry_count = 0
        
        while retry_count < max_retries:
            try:
                # 等待页面加载
                wait.until(EC.presence_of_element_located((By.ID, 'bodyDisplay')))
                
                # 检查是否出现无效用户提示
                if self.check_invalid_user(driver):
                    print("检测到无效用户提示，正在刷新页面...")
                    driver.refresh()
                    time.sleep(2)  # 等待刷新完成
                    retry_count += 1
                    continue
                
                # 如果没有无效用户提示，说明加载成功
                return True
                
            except TimeoutException:
                print(f"等待超时，第 {retry_count + 1} 次重试...")
                driver.refresh()
                retry_count += 1
                time.sleep(2)
                
        return False

    def fetch_post_content(self, url):
        """获取帖子详细内容"""
        driver = None
        try:
            print(f"正在获取帖子内容: {url}")
            
            # 配置 Chrome 选项
            chrome_options = uc.ChromeOptions()
            chrome_options.add_argument('--headless')
            chrome_options.add_argument('--no-sandbox')
            chrome_options.add_argument('--disable-dev-shm-usage')
            chrome_options.add_argument('--disable-gpu')
            chrome_options.add_argument('--ignore-certificate-errors')
            chrome_options.add_argument('--ignore-ssl-errors')
            
            # 使用 undetected-chromedriver
            driver = uc.Chrome(
                options=chrome_options,
                version_main=131,  # 指定当前 Chrome 版本
                driver_executable_path=None,
                suppress_welcome=True,
                use_subprocess=True
            )
            driver.set_page_load_timeout(30)
            
            # 获取页面内容
            driver.get(url)
            
            # 等待页面加载  
            if not self.wait_for_content(driver):
                raise Exception("页面加载失败，超过最大重试次数")
            
            # 获取帖子内容
            soup = BeautifulSoup(driver.page_source, 'html.parser')
            content_element = soup.find('div', id='bodyDisplay')
            
            # 返回内容和回复
            return content_element.text.strip() if content_element else ''
            
        except Exception as e:
            print(f"获取帖子内容时出错: {e}")
            return ''
            
        finally:
            if driver:
                try:
                    driver.quit()
                except Exception as e:
                    print(f"关闭浏览器时出错: {e}")

    def save_to_file(self, topics, filename='shopify_topics.json'):
        """保存数据到文件"""
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(topics, f, ensure_ascii=False, indent=2)
            print(f"数据已保存到 {filename}")
        except Exception as e:
            print(f"保存数据时出错: {e}")

    def run(self, browser='chrome', start_page=1, max_pages=5):
        """运行爬虫"""
        try:
            if not self.load_browser_cookies(browser):
                print("无法加载浏览器 cookies，请确保已在浏览器中登录")
                return []
            
            all_topics = []
            for page in range(start_page, max_pages + 1):
                print(f"\n正在爬取第 {page} 页...")
                soup = self.fetch_page(page)
                if soup:
                    topics = self.parse_topics(soup)
                    all_topics.extend(topics)
                    time.sleep(2)  # 页面间延迟
                else:
                    break
            
            return all_topics
            
        finally:
            # 确保关闭数据库会话
            self.db_session.close()
