import joblib

from BaseDatasetLoader import BaseDatasetLoader

import os
import csv
import sys
import re
from surprise import Dataset
from surprise import Reader
from collections import defaultdict
import numpy as np

rows = 1000
# rows = 2500


class BookCrossingLoader(BaseDatasetLoader):
    def __init__(self, item_path: str, epoch: int = 0, max_rows: int = 1000):

        self.item_path = item_path
        self.rating_path = 'ratings.csv'
        self.cache_path = [
            'bookID_to_title.dat',
            'title_to_bookID.dat'
        ]

        self.epoch = epoch
        self.max_rows = max_rows

        self.bookID_to_title = {}
        self.title_to_bookID = {}
        if os.path.exists(self.cache_path[0]):
            self.bookID_to_title = joblib.load(self.cache_path[0])
        if os.path.exists(self.cache_path[1]):
            self.title_to_bookID = joblib.load(self.cache_path[1])

        self._load_item_mappings()

        # 缓存 bookID_to_title title_to_bookID 到文件
        self.save_cache()

    def save_cache(self):
        try:
            joblib.dump(self.bookID_to_title, self.cache_path[0])
            joblib.dump(self.title_to_bookID, self.cache_path[1])
            print(f"Similarity cache saved to {self.cache_path}")
        except Exception as e:
            print(f"Failed to save cache: {e}")

    def _load_item_mappings(self):
        seen_titles = set()
        if os.path.exists('seen_titles.dat'):
            self.bookID_to_title = joblib.load(self.cache_path[0])
        start_row = self.epoch * self.max_rows
        end_row = start_row + self.max_rows

        with open(self.item_path, encoding='ISO-8859-1') as f:
            csv_reader = csv.reader(f)
            next(csv_reader)  # 跳过表头
            # print(len(f.readlines())) # 2356009

            count = start_row  # epoch 索引
            book_id_counter = 0 + len(self.bookID_to_title)  # bookID计数器

            for row in csv_reader:
                if count >= end_row:
                    break

                if count >= start_row:
                    # item_id = int(row[0])
                    item_id = count
                    raw_title = row[6].strip()  # 原始书名

                    # 标准化处理（可根据需要扩展）
                    normalized_title = raw_title.lower().replace(" ", "")  # 示例：小写+去空格

                    # 去重逻辑
                    if normalized_title not in seen_titles:
                        self.bookID_to_title[book_id_counter] = raw_title
                        self.title_to_bookID[raw_title] = book_id_counter
                        seen_titles.add(normalized_title)
                        book_id_counter += 1  # 仅在成功添加时递增

                    # else:
                    # print(f"发现重复书名: {raw_title} (ID {item_id}) 将被跳过")

                count += 1

        # print(self.title_to_bookID)
        # print(self.bookID_to_title)

        joblib.dump(seen_titles, 'seen_titles.dat')

        # 缓存 bookID_to_title title_to_bookID 到文件
        self.save_cache()

    def load_dataset(self) -> Dataset:
        rating_dataset = extract_ratings(self.item_path, self.rating_path, self.epoch)
        return rating_dataset

    def get_user_ratings(self, user_id: int) -> list:
        pass

    def get_item_features(self) -> defaultdict:
        pass

    def get_popularity_ranks(self) -> defaultdict:
        """
        获取图书的流行度排名。

        :return: 包含图书ID到流行度排名的字典。
        """
        ratings = defaultdict(int)
        rankings = defaultdict(int)
        with open(self.rating_path, newline='') as csvfile:
            ratingReader = csv.reader(csvfile)
            next(ratingReader)
            for row in ratingReader:
                bookID = int(row[1])
                ratings[bookID] += 1
        rank = 1
        # 根据 rating[1](book出现次数) 排序, 然后遍历, 将出现次数改为 rank(排名)
        for bookID, ratingCount in sorted(ratings.items(), key=lambda x: x[1], reverse=True):
            rankings[bookID] = rank  # 设置排名
            rank += 1  # 下一个的排名变为当前rank+1
        return rankings

    def getBookName(self, bookID: int) -> str:
        """
        获取指定图书ID对应的图书名。

        :param bookID: 图书ID。
        :return: 图书名。
        """
        if bookID in self.bookID_to_title:
            return self.bookID_to_title[bookID]
        else:
            return ""

    def getYears(self):
        """
        获取图书出版年份
        :return: {图书ID - 出版年份}
        """
        # 四位数字
        # p = re.compile(r"(?:\((\d{4})\))?\s*$")
        p = re.compile(r"(\d{4})[.-]\d{1}\s*$")
        years = defaultdict(int)

        start_row = self.epoch * self.max_rows
        end_row = start_row + self.max_rows

        count = start_row
        with open(self.item_path, newline='', encoding='ISO-8859-1') as csvfile:
            reader = csv.reader(csvfile)
            next(reader)
            for row in reader:
                if count >= end_row:
                    break

                if count >= start_row:
                    bookID = self.title_to_bookID[row[6]]
                    publish_year = row[8]  # 2002.10
                    # print(publish_year)
                    m = p.search(publish_year)  # 使用正则

                    year = m.group(1) if m else None
                    if year and year.isdigit():
                        years[bookID] = int(year)
                count += 1
        return years

    def getCategory(self):
        """
        获取图书的分类数据
        :return: {bookID-分类}
        """

        # 第一步：收集所有可能的类别
        all_categories = set()
        raw_data = []

        start_row = self.epoch * self.max_rows
        end_row = start_row + self.max_rows

        # 第一次遍历：收集所有唯一分类标签
        with open(self.item_path, newline='', encoding='ISO-8859-1') as csvfile:
            reader = csv.reader(csvfile)
            next(reader)
            count = start_row
            for row in reader:
                if count >= end_row:
                    break

                if count >= start_row:
                    # 原始分类字符串处理（示例格式："['Fiction', 'Science']"）
                    category_str = row[15].replace('[', '').replace(']', '').replace("'", '')
                    categories = [c.strip() for c in category_str.split(',') if c.strip()]
                    all_categories.update(categories)
                    raw_data.append((count, categories))  # 暂存原始数据
                count += 1

        # 创建类别到索引的映射
        category_list = sorted(all_categories)
        category_index = {cat: idx for idx, cat in enumerate(category_list)}

        # 第二步：生成one-hot向量
        genres = defaultdict(list)
        for bookID, categories in raw_data:
            vector = [0] * len(category_list)
            for cat in categories:
                if cat in category_index:
                    vector[category_index[cat]] = 1
            genres[bookID] = vector

        # 保存类别列表供后续使用（可选）
        self.category_labels = category_list

        return genres

    def getSummery(self):
        """
        获取图书摘要
        :return: {bookID-summery}
        """
        summers = defaultdict(str)

        start_row = self.epoch * self.max_rows
        end_row = start_row + self.max_rows

        count = start_row
        with open(self.item_path, newline='', encoding='ISO-8859-1') as csvfile:
            reader = csv.reader(csvfile)
            next(reader)
            for row in reader:
                if count >= end_row:
                    break

                if count >= start_row:
                    bookID = self.title_to_bookID[row[6]]
                    summery = row[13]
                    # print(bookID, category)
                    summers[bookID] = summery if summery else 'NULL'
                count += 1
        return summers


def extract_ratings(input_path, output_path, epoch):
    with open(input_path, 'r', newline='', encoding='ISO-8859-1') as infile, \
            open(output_path, 'w', newline='', encoding='ISO-8859-1') as outfile:
        reader = csv.reader(infile)
        writer = csv.writer(outfile)

        # 写入新header（可选）
        writer.writerow(['userId', 'itemId', 'rating'])

        next(reader)  # 跳过原header

        row_count = epoch * 1000  # 新增计数器
        for row in reader:
            if row_count >= rows * epoch + rows:  # 达到限制时终止循环
                break
            rating = row[5]
            # 防止除零
            if rating == '0':
                rating = '1'
                # 按你的列描述提取：userID是第2列，bookID是第1列，rating是第6列
            new_row = [
                row[1],  # userId（第2列）
                row[0],  # itemId（第1列）
                float(rating)  # rating（第6列）
            ]
            # print(new_row)
            writer.writerow(new_row)
            row_count += 1  # 计数器递增

    # 用Surprise加载新数据集
    reader = Reader(line_format='user item rating', sep=',', skip_lines=1)
    return Dataset.load_from_file(output_path, reader=reader)


def extract_users(input_path, output_path):
    pass
