# -*- coding: utf-8 -*-

"""
从Elasticsearch中读取商品信息并爬取评论
"""

from elasticsearch import Elasticsearch
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import re
from datetime import datetime
from crawProj.distributed_weibo_spider.items import AmazonReviewItem
import json


class AmazonReviewCrawler:
    """
    从Elasticsearch读取商品信息并爬取评论
    """
    
    def __init__(self, es_host, es_username, es_password):
        # 初始化Elasticsearch客户端
        self.es = Elasticsearch(
            hosts=[es_host],
            http_auth=(es_username, es_password),
            verify_certs=False
        )
        
        # 初始化WebDriver
        self.driver = None
        self.wait = None
        
    def init_driver(self):
        """
        初始化WebDriver
        """
        chrome_options = Options()
        chrome_options.add_argument("--headless")  # 无头模式
        chrome_options.add_argument("--no-sandbox")
        chrome_options.add_argument("--disable-dev-shm-usage")
        chrome_options.add_argument("--disable-gpu")
        chrome_options.add_argument("--window-size=1920,1080")
        chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
        # 添加更多浏览器指纹规避选项
        chrome_options.add_argument("--disable-blink-features=AutomationControlled")
        chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
        chrome_options.add_experimental_option('useAutomationExtension', False)
        
        # 指定chromedriver路径
        service = Service("/usr/bin/chromedriver")
        
        # 创建WebDriver实例
        self.driver = webdriver.Chrome(service=service, options=chrome_options)
        self.wait = WebDriverWait(self.driver, 10)
        
        # 执行JavaScript来隐藏webdriver属性
        self.driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
        
    def close_driver(self):
        """
        关闭WebDriver
        """
        if self.driver:
            self.driver.quit()
    
    def get_products_from_es(self, size=10):
        """
        从Elasticsearch中获取商品信息
        """
        try:
            # 搜索商品数据
            response = self.es.search(
                index="amazon_products",
                body={
                    "size": size,
                    "query": {
                        "match_all": {}
                    }
                }
            )
            
            products = []
            for hit in response['hits']['hits']:
                product = hit['_source']
                products.append(product)
            
            print(f"从Elasticsearch中获取到 {len(products)} 个商品")
            return products
            
        except Exception as e:
            print(f"从Elasticsearch获取商品信息时出错: {e}")
            return []
    
    def get_product_reviews(self, asin, product_url):
        """
        获取单个商品的评论信息
        """
        reviews = []
        
        try:
            # 构建评论页面URL
            reviews_url = f"https://www.amazon.com/product-reviews/{asin}?reviewerType=all_reviews"
            print(f"正在访问评论页面: {reviews_url}")
            self.driver.get(reviews_url)
            time.sleep(5)  # 等待页面加载
            
            # 检查是否有验证码
            if self.check_for_captcha():
                print("检测到验证码，跳过此商品评论获取")
                return reviews
            
            # 查找评论元素
            review_elements = self.driver.find_elements(By.CSS_SELECTOR, "[data-hook='review']")
            if not review_elements:
                review_elements = self.driver.find_elements(By.CSS_SELECTOR, ".review")
            
            print(f"找到 {len(review_elements)} 条评论")
            
            for review_element in review_elements:
                try:
                    review = AmazonReviewItem()
                    
                    # 获取评论ID
                    review_id = review_element.get_attribute("id")
                    if review_id:
                        id_match = re.search(r'customer_review-([A-Z\d]+)', review_id)
                        if id_match:
                            review['id'] = id_match.group(1)
                    
                    # 获取评论标题
                    try:
                        title_element = review_element.find_element(By.CSS_SELECTOR, "[data-hook='review-title'] span")
                        if not (title_element.text.strip() or title_element.get_attribute("textContent")):
                            title_element = review_element.find_element(By.CSS_SELECTOR, ".review-title span")
                        review['title'] = (title_element.text.strip() or title_element.get_attribute("textContent") or "").strip()
                    except NoSuchElementException:
                        review['title'] = ""
                    
                    # 获取评论内容
                    try:
                        content_element = review_element.find_element(By.CSS_SELECTOR, "[data-hook='review-body']")
                        review['content'] = (content_element.text.strip() or content_element.get_attribute("textContent") or "").strip()
                    except NoSuchElementException:
                        try:
                            content_elements = review_element.find_elements(By.CSS_SELECTOR, ".review-text-content span")
                            if not content_elements:
                                content_elements = review_element.find_elements(By.CSS_SELECTOR, ".review-text span")
                            review['content'] = ' '.join([elem.text or elem.get_attribute("textContent") or "" for elem in content_elements]).strip()
                        except NoSuchElementException:
                            review['content'] = ""
                    
                    # 获取评分
                    try:
                        rating_element = review_element.find_element(By.CSS_SELECTOR, "[data-hook='review-star-rating']")
                        rating_text = rating_element.get_attribute("textContent") or rating_element.text
                        rating_match = re.search(r'(\d+\.?\d*) out of 5', rating_text)
                        if rating_match:
                            review['rating'] = float(rating_match.group(1))
                    except NoSuchElementException:
                        try:
                            rating_element = review_element.find_element(By.CSS_SELECTOR, ".review-rating span")
                            rating_text = rating_element.get_attribute("textContent") or rating_element.text
                            rating_match = re.search(r'(\d+\.?\d*) out of 5', rating_text)
                            if rating_match:
                                review['rating'] = float(rating_match.group(1))
                        except NoSuchElementException:
                            review['rating'] = 0.0
                    
                    # 获取用户名
                    try:
                        user_element = review_element.find_element(By.CSS_SELECTOR, ".a-profile-name")
                        review['user'] = (user_element.text.strip() or user_element.get_attribute("textContent") or "").strip()
                    except NoSuchElementException:
                        review['user'] = "Anonymous"
                    
                    # 获取日期
                    try:
                        date_element = review_element.find_element(By.CSS_SELECTOR, "[data-hook='review-date']")
                        review['date'] = (date_element.text.strip() or date_element.get_attribute("textContent") or "").strip()
                    except NoSuchElementException:
                        try:
                            date_element = review_element.find_element(By.CSS_SELECTOR, ".review-date")
                            review['date'] = (date_element.text.strip() or date_element.get_attribute("textContent") or "").strip()
                        except NoSuchElementException:
                            review['date'] = ""
                    
                    # 获取点赞数
                    try:
                        helpful_element = review_element.find_element(By.CSS_SELECTOR, ".cr-helpful-vote-text")
                        helpful_text = (helpful_element.text.strip() or helpful_element.get_attribute("textContent") or "").strip()
                    except NoSuchElementException:
                        try:
                            helpful_element = review_element.find_element(By.CSS_SELECTOR, "[data-hook='helpful-vote-summary']")
                            helpful_text = (helpful_element.text.strip() or helpful_element.get_attribute("textContent") or "").strip()
                        except NoSuchElementException:
                            try:
                                helpful_element = review_element.find_element(By.CSS_SELECTOR, ".review-votes")
                                helpful_text = (helpful_element.text.strip() or helpful_element.get_attribute("textContent") or "").strip()
                            except NoSuchElementException:
                                helpful_text = ""
                    
                    if helpful_text:
                        helpful_count = re.sub(r'[^\d]', '', helpful_text)
                        if helpful_count:
                            review['helpful_count'] = int(helpful_count)
                    
                    review['asin'] = asin
                    review['crawl_time'] = datetime.now()
                    
                    reviews.append(review)
                    
                except Exception as e:
                    print(f"处理评论时出错: {e}")
                    continue
                    
        except Exception as e:
            print(f"获取商品评论时出错: {e}")
            
        return reviews
    
    def check_for_captcha(self):
        """
        检查页面是否包含验证码
        """
        try:
            # 检查是否有验证码输入框
            captcha = self.driver.find_element(By.ID, "captchacharacters")
            if captcha:
                return True
        except NoSuchElementException:
            pass
        
        # 检查页面标题是否包含验证码相关词汇
        try:
            title = self.driver.title
            if "Captcha" in title or "验证" in title:
                return True
        except:
            pass
        
        return False
    
    def crawl_reviews_from_es_products(self, max_products=10):
        """
        从Elasticsearch中的商品信息爬取评论
        """
        # 获取商品信息
        products = self.get_products_from_es(max_products)
        if not products:
            print("未能从Elasticsearch中获取到商品信息")
            return []
        
        # 初始化WebDriver
        self.init_driver()
        
        all_reviews = []
        
        try:
            # 遍历商品并获取评论
            for i, product in enumerate(products):
                asin = product.get('asin')
                url = product.get('url')
                
                if not asin:
                    print(f"商品 {i+1} 缺少ASIN信息，跳过")
                    continue
                
                print(f"正在获取第 {i+1} 个商品的评论，ASIN: {asin}")
                reviews = self.get_product_reviews(asin, url)
                all_reviews.extend(reviews)
                print(f"获取到 {len(reviews)} 条评论")
                
                # 添加延迟以避免触发反爬虫机制
                time.sleep(3)
                
        finally:
            # 关闭WebDriver
            self.close_driver()
        
        print(f"总共获取到 {len(all_reviews)} 条评论")
        return all_reviews
    
    def save_reviews_to_es(self, reviews):
        """
        将评论数据保存到Elasticsearch
        """
        try:
            for review in reviews:
                # 转换review为可序列化字典
                review_dict = dict(review)
                
                # 处理datetime对象
                for key, value in review_dict.items():
                    if isinstance(value, datetime):
                        review_dict[key] = value.isoformat()
                
                # 存储到Elasticsearch
                self.es.index(
                    index='amazon_reviews',
                    document=review_dict
                )
            
            print(f"成功将 {len(reviews)} 条评论保存到Elasticsearch")
            return True
            
        except Exception as e:
            print(f"保存评论到Elasticsearch时出错: {e}")
            return False
    
    def save_reviews_to_json(self, reviews, filename=None):
        """
        将评论数据保存到JSON文件
        """
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f'amazon_reviews_from_es_{timestamp}.json'
        
        try:
            # 转换评论数据为可序列化格式
            reviews_data = []
            for review in reviews:
                review_dict = dict(review)
                # 处理datetime对象
                if 'crawl_time' in review_dict and hasattr(review_dict['crawl_time'], 'isoformat'):
                    review_dict['crawl_time'] = review_dict['crawl_time'].isoformat()
                reviews_data.append(review_dict)
            
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(reviews_data, f, ensure_ascii=False, indent=2)
            
            print(f"评论数据已保存到 {filename}")
            return True
            
        except Exception as e:
            print(f"保存评论到JSON文件时出错: {e}")
            return False


def main():
    """
    主函数
    """
    print("开始从Elasticsearch中读取商品信息并爬取评论...")
    
    # Elasticsearch连接信息
    es_host = 'http://1.94.107.109:9200'
    es_username = 'elastic'
    es_password = '07122201hm'
    
    # 创建评论爬虫实例
    crawler = AmazonReviewCrawler(es_host, es_username, es_password)
    
    # 从Elasticsearch中的商品信息爬取评论
    reviews = crawler.crawl_reviews_from_es_products(max_products=5)
    
    if reviews:
        # 保存评论到Elasticsearch
        crawler.save_reviews_to_es(reviews)
        
        # 保存评论到JSON文件
        crawler.save_reviews_to_json(reviews)
        
        # 显示部分评论信息
        print("\n前5条评论:")
        for i, review in enumerate(reviews[:5]):
            print(f"{i+1}. {review.get('title', 'N/A')}")
            print(f"   评分: {review.get('rating', 'N/A')}")
            print(f"   用户: {review.get('user', 'N/A')}")
            print(f"   ASIN: {review.get('asin', 'N/A')}")
            content = review.get('content', 'N/A')
            if len(content) > 100:
                content = content[:100] + "..."
            print(f"   内容: {content}")
            print()
    else:
        print("未能获取到任何评论信息")


if __name__ == "__main__":
    main()