#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
能正确工作的日本邮政EMS运费爬虫脚本
爬取网站：https://www.post.japanpost.jp/int/charge/list/ems_all_cn.html
"""

import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
from typing import Dict, List

class JapanPostEMSScraper:
    """日本邮政EMS运费爬虫类"""
    
    def __init__(self):
        self.base_url = "https://www.post.japanpost.jp/int/charge/list/ems_all_cn.html"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7',
        }
        
        # 区域定义
        self.zones = {
            "第1区": "中国、韩国、台湾",
            "第2区": "亚洲（中国、韩国、台湾除外）",
            "第3区": "大洋洲、加拿大、墨西哥、中近东、欧洲",
            "第4区": "美国（包括关岛等海外领土）",
            "第5区": "中南美（墨西哥除外）、非洲"
        }
    
    def fetch_page(self) -> str:
        """获取网页内容"""
        try:
            print("🌐 正在获取日本邮政EMS运费页面...")
            response = requests.get(self.base_url, headers=self.headers, timeout=30)
            response.raise_for_status()
            response.encoding = 'utf-8'
            print(f"✅ 页面获取成功，状态码：{response.status_code}")
            return response.text
        except requests.RequestException as e:
            print(f"❌ 获取页面失败：{e}")
            return ""
    
    def parse_table(self, html_content: str) -> List[Dict]:
        """解析运费表格"""
        print("🔍 正在解析运费表格...")
        
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 查找运费表格
        tables = soup.find_all('table')
        ems_table = None
        
        for table in tables:
            if table.get('class') and 'data' in table.get('class'):
                ems_table = table
                break
        
        if not ems_table:
            print("❌ 未找到EMS运费表格")
            return []
        
        print("✅ 找到EMS运费表格")
        
        # 解析表格行
        rows = ems_table.find_all('tr')
        shipping_data = []
        
        print(f"📊 表格共有 {len(rows)} 行")
        
        for i, row in enumerate(rows):
            cells = row.find_all(['td', 'th'])
            
            # 跳过表头行（前2行）
            if i < 2:
                continue
                
            if len(cells) >= 6:  # 确保有足够的列（6列：重量+5个区域）
                # 提取重量信息
                weight_text = cells[0].get_text(strip=True)
                
                # 检查是否是重量行
                if '不超过' in weight_text and ('kg' in weight_text or 'g' in weight_text):
                    weight = self.parse_weight(weight_text)
                    
                    # 提取各区域价格（从第2列开始，共5列）
                    prices = {}
                    for j in range(5):  # 5个区域
                        if j + 1 < len(cells):
                            price_text = cells[j + 1].get_text(strip=True)
                            price = self.parse_price(price_text)
                            if price is not None:
                                zone_name = f"第{j+1}区"
                                prices[zone_name] = price
                    
                    if prices:  # 只添加有价格数据的行
                        shipping_data.append({
                            'weight': weight,
                            'weight_text': weight_text,
                            'prices': prices
                        })
                        print(f"  ✅ 解析行 {i+1}: {weight_text} - {len(prices)} 个价格")
        
        print(f"✅ 成功解析 {len(shipping_data)} 条运费数据")
        return shipping_data
    
    def parse_weight(self, weight_text: str) -> float:
        """解析重量文本为数值（kg）"""
        try:
            # 移除"不超过"等文字
            clean_text = weight_text.replace('不超过', '').strip()
            
            if 'kg' in clean_text:
                weight = float(clean_text.replace('kg', ''))
            elif 'g' in clean_text:
                weight = float(clean_text.replace('g', '')) / 1000
            else:
                weight = float(clean_text)
                
            return weight
        except ValueError:
            print(f"⚠️  无法解析重量：{weight_text}")
            return 0.0
    
    def parse_price(self, price_text: str) -> int:
        """解析价格文本为数值"""
        try:
            # 移除逗号和空格
            clean_text = price_text.replace(',', '').replace(' ', '').strip()
            if clean_text:
                return int(clean_text)
            return None
        except ValueError:
            print(f"⚠️  无法解析价格：{price_text}")
            return None
    
    def create_dataframe(self, shipping_data: List[Dict]) -> pd.DataFrame:
        """创建DataFrame格式的数据"""
        print("📊 正在创建数据表格...")
        
        # 准备DataFrame数据
        df_data = []
        
        for item in shipping_data:
            row = {
                '重量(kg)': item['weight'],
                '重量描述': item['weight_text']
            }
            
            # 添加各区域价格
            for zone_name, price in item['prices'].items():
                row[f'{zone_name}_价格(日元)'] = price
                row[f'{zone_name}_区域'] = self.zones[zone_name]
            
            df_data.append(row)
        
        df = pd.DataFrame(df_data)
        print("✅ 数据表格创建完成")
        return df
    
    def save_to_excel(self, df: pd.DataFrame, filename: str = "japanpost_ems_prices.xlsx"):
        """保存数据到Excel文件"""
        try:
            print(f"💾 正在保存数据到 {filename}...")
            df.to_excel(filename, index=False, sheet_name='EMS运费表')
            print(f"✅ 数据已保存到 {filename}")
        except Exception as e:
            print(f"❌ 保存Excel文件失败：{e}")
    
    def save_to_json(self, shipping_data: List[Dict], filename: str = "japanpost_ems_prices.json"):
        """保存数据到JSON文件"""
        try:
            print(f"💾 正在保存数据到 {filename}...")
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(shipping_data, f, ensure_ascii=False, indent=2)
            print(f"✅ 数据已保存到 {filename}")
        except Exception as e:
            print(f"❌ 保存JSON文件失败：{e}")
    
    def print_summary(self, shipping_data: List[Dict]):
        """打印数据摘要"""
        print("\n📋 数据摘要")
        print("=" * 60)
        print(f"总记录数：{len(shipping_data)}")
        
        if shipping_data:
            print(f"重量范围：{shipping_data[0]['weight_text']} - {shipping_data[-1]['weight_text']}")
            
            # 显示前几条数据作为示例
            print("\n📦 示例数据（前3条）：")
            for i, item in enumerate(shipping_data[:3]):
                print(f"\n{i+1}. {item['weight_text']}")
                for zone, price in item['prices'].items():
                    print(f"   {zone}: {price:,} 日元")
    
    def run(self):
        """运行爬虫"""
        print("🚀 开始爬取日本邮政EMS运费数据...")
        print("=" * 60)
        
        # 1. 获取页面
        html_content = self.fetch_page()
        if not html_content:
            return
        
        # 2. 解析表格
        shipping_data = self.parse_table(html_content)
        if not shipping_data:
            return
        
        # 3. 创建DataFrame
        df = self.create_dataframe(shipping_data)
        
        # 4. 保存数据
        self.save_to_excel(df)
        self.save_to_json(shipping_data)
        
        # 5. 显示摘要
        self.print_summary(shipping_data)
        
        # 6. 显示完整数据
        print("\n📊 完整运费数据：")
        print("=" * 60)
        print(df.to_string(index=False))
        
        print("\n🎉 爬取完成！")

def main():
    """主函数"""
    scraper = JapanPostEMSScraper()
    scraper.run()

if __name__ == "__main__":
    main()
