#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
1688商品搜索API服务
提供商品搜索、数据分析等接口
"""

import os
import sys
import json
import csv
import io
from typing import List, Optional
from fastapi import FastAPI, HTTPException, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
import uvicorn

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from test_multiple_keywords import Multi1688Spider

app = FastAPI(title="1688商品搜索API", version="1.0.0")

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 数据模型
class SearchRequest(BaseModel):
    keyword: str
    province: Optional[str] = ""
    city: Optional[str] = ""
    max_products: Optional[int] = 100

class Product(BaseModel):
    title: str
    price: str
    sales: str
    link: str
    tags: List[str]
    image: str
    supplier: str
    company_name: str
    return_rate: str

class SearchResponse(BaseModel):
    products: List[Product]
    total: int
    keyword: str
    province: str
    city: str
    analysis: dict

# 全局变量存储最新搜索结果
latest_search_result = None

@app.get("/")
async def root():
    return {"message": "1688商品搜索API服务"}

@app.post("/api/search", response_model=SearchResponse)
async def search_products(request: SearchRequest, background_tasks: BackgroundTasks):
    """
    搜索1688商品
    """
    global latest_search_result
    
    try:
        spider = Multi1688Spider()
        spider.init_driver()
        
        # 爬取商品数据
        products = spider.crawl_products(
            keyword=request.keyword,
            province=request.province,
            city=request.city,
            max_products=request.max_products
        )
        
        spider.quit_driver()
        
        # 数据分析
        analysis = analyze_products(products)
        
        # 构建响应
        response = SearchResponse(
            products=[Product(**product) for product in products],
            total=len(products),
            keyword=request.keyword,
            province=request.province,
            city=request.city,
            analysis=analysis
        )
        
        # 保存最新搜索结果
        latest_search_result = response
        
        return response
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"搜索失败: {str(e)}")

@app.get("/api/export/csv")
async def export_csv():
    """
    导出最新搜索结果为CSV
    """
    global latest_search_result
    
    if not latest_search_result:
        raise HTTPException(status_code=404, detail="没有可导出的数据")
    
    # 创建CSV内容
    output = io.StringIO()
    writer = csv.writer(output)
    
    # 写入表头
    writer.writerow(["商品标题", "价格", "销量", "供应商", "公司名称", "回头率", "链接", "标签", "图片"])
    
    # 写入数据
    for product in latest_search_result.products:
        writer.writerow([
            product.title,
            product.price,
            product.sales,
            product.supplier,
            product.company_name,
            product.return_rate,
            product.link,
            "; ".join(product.tags),
            product.image
        ])
    
    output.seek(0)
    
    # 返回CSV文件
    return StreamingResponse(
        io.BytesIO(output.getvalue().encode('utf-8-sig')),
        media_type="text/csv",
        headers={"Content-Disposition": "attachment; filename=products.csv"}
    )

@app.get("/api/analysis")
async def get_analysis():
    """
    获取最新搜索结果的分析数据
    """
    global latest_search_result
    
    if not latest_search_result:
        raise HTTPException(status_code=404, detail="没有可分析的数据")
    
    return latest_search_result.analysis

def analyze_products(products: List[dict]) -> dict:
    """
    分析商品数据
    """
    if not products:
        return {}
    
    # 价格分析
    prices = []
    for product in products:
        price_str = product.get('price', '0')
        try:
            # 提取数字价格
            price_num = float(''.join(filter(str.isdigit, price_str.replace('.', ''))))
            if price_num > 0:
                prices.append(price_num / 100)  # 转换为元
        except:
            continue
    
    # 销量分析
    sales = []
    for product in products:
        sales_str = product.get('sales', '0')
        try:
            sales_num = int(''.join(filter(str.isdigit, sales_str)))
            if sales_num > 0:
                sales.append(sales_num)
        except:
            continue
    
    # 标签统计
    tag_count = {}
    for product in products:
        for tag in product.get('tags', []):
            tag_count[tag] = tag_count.get(tag, 0) + 1
    
    # 数据完整性
    completeness = {
        'title': sum(1 for p in products if p.get('title')),
        'price': sum(1 for p in products if p.get('price')),
        'sales': sum(1 for p in products if p.get('sales')),
        'link': sum(1 for p in products if p.get('link')),
        'tags': sum(1 for p in products if p.get('tags')),
        'image': sum(1 for p in products if p.get('image'))
    }
    
    analysis = {
        'total_products': len(products),
        'price_analysis': {
            'min': min(prices) if prices else 0,
            'max': max(prices) if prices else 0,
            'avg': sum(prices) / len(prices) if prices else 0,
            'count': len(prices)
        },
        'sales_analysis': {
            'min': min(sales) if sales else 0,
            'max': max(sales) if sales else 0,
            'avg': sum(sales) / len(sales) if sales else 0,
            'count': len(sales)
        },
        'top_tags': sorted(tag_count.items(), key=lambda x: x[1], reverse=True)[:10],
        'completeness': completeness,
        'completeness_rate': {
            k: f"{(v/len(products)*100):.1f}%" for k, v in completeness.items()
        }
    }
    
    return analysis

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)