import numpy as np
import os
import random
import json
from tqdm import tqdm
from collections import defaultdict
from transformers import AutoTokenizer
import matplotlib.pyplot as plt

class ParagraphProcessor:
    def __init__(self, file_path, paragraph_sep_token=198, tokenizer_name="gpt2"):
        """
        初始化段落处理器
        
        参数:
            file_path (str): .npy文件路径
            paragraph_sep_token (int): 段落分隔符的token ID (默认198对应GPT-2的换行符)
            tokenizer_name (str): 分词器名称
        """
        self.file_path = file_path
        self.paragraph_sep_token = paragraph_sep_token
        self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, local_files_only=True)
        
        # 获取文件基本信息
        self.file_size = os.path.getsize(file_path)
        self.dtype = np.uint16
        self.dtype_size = np.dtype(self.dtype).itemsize
        self.total_tokens = self.file_size // self.dtype_size
        
        print(f"文件信息: {file_path}")
        print(f"文件大小: {self.file_size / (1024**3):.2f} GB")
        print(f"总token数: {self.total_tokens:,}")
        print(f"段落分隔符token: {paragraph_sep_token} -> '{self.tokenizer.decode([paragraph_sep_token])}'")
        
        # 创建内存映射
        self.mmap = np.memmap(file_path, dtype=self.dtype, mode='r')
        
    def find_paragraphs(self, chunk_size_mb=200):
        """
        查找所有段落的位置信息
        
        参数:
            chunk_size_mb (int): 处理块大小(MB)
            
        返回:
            list: 段落信息字典列表
        """
        print("\n开始扫描段落位置...")
        
        # 计算块参数
        elements_per_chunk = int(chunk_size_mb * 1024**2 / self.dtype_size)
        num_chunks = (self.total_tokens + elements_per_chunk - 1) // elements_per_chunk
        
        paragraphs = []
        current_para_start = 0
        para_id = 0
        
        # 处理每个块
        for chunk_idx in tqdm(range(num_chunks), desc="扫描段落", total=num_chunks):
            start = chunk_idx * elements_per_chunk
            end = min(start + elements_per_chunk, self.total_tokens)
            
            # 获取当前块数据
            chunk = self.mmap[start:end]
            
            # 查找段落分隔符位置
            sep_positions = np.where(chunk == self.paragraph_sep_token)[0] + start
            # 查找段落分隔符位置（修改后版本）
            positions = np.where(chunk == self.paragraph_sep_token)[0]
            if len(positions) == 0:
                sep_positions = np.array([])
            else:
                result_positions = []
                current_length = 1  # 当前连续组长度
                last_pos = positions[0]  # 当前连续组的最后一个位置
    
            # 遍历所有分隔符位置（从第二个开始）
            for i in range(1, len(positions)):
                current = positions[i]
            # 检查是否连续（当前元素 = 前一个元素 + 1）
                if current == last_pos + 1:
                    current_length += 1
                else:
                # 当连续中断时，如果已有连续组长度>=2，则记录其最后一个位置
                    if current_length >= 2:
                        result_positions.append(last_pos)
                    current_length = 1  # 重置连续计数器
                last_pos = current  # 更新最后位置
            # 处理最后一组连续分隔符
            if current_length >= 2:
                result_positions.append(last_pos)
    
            # 转换为数组并添加起始偏移
            sep_positions = np.array(result_positions) + start

            # 处理找到的分隔符

            for pos in sep_positions:
                # 如果当前段落有内容
                if pos > current_para_start:
                    # 保存前一个段落信息
                    paragraphs.append({
                        "id": para_id,
                        "start": current_para_start,
                        "end": pos,
                        "length": pos - current_para_start
                    })
                    para_id += 1
                
                # 新段落开始位置
                current_para_start = pos + 1
            break
        
        # # 处理最后一个段落
        # if current_para_start < self.total_tokens:
        #     paragraphs.append({
        #         "id": para_id,
        #         "start": current_para_start,
        #         "end": self.total_tokens,
        #         "length": self.total_tokens - current_para_start
        #     })
        
        print(f"找到段落总数: {len(paragraphs):,}")
        return paragraphs
    
    def analyze_paragraphs(self, paragraphs):
        """分析段落统计信息"""
        print("\n分析段落统计信息...")
        
        # 计算基本统计
        lengths = [p["length"] for p in paragraphs]
        total_paragraph_tokens = sum(lengths)
        
        stats = {
            "total_paragraphs": len(paragraphs),
            "total_paragraph_tokens": total_paragraph_tokens,
            "avg_tokens_per_para": total_paragraph_tokens / len(paragraphs),
            "min_tokens_per_para": min(lengths),
            "max_tokens_per_para": max(lengths),
            "length_distribution": defaultdict(int)
        }
        
        # 统计长度分布
        for length in lengths:
            bucket = min(1000, (length // 50) * 50)  # 按50个token分组
            stats["length_distribution"][bucket] += 1
        
        # 打印统计信息
        print(f"段落数量: {stats['total_paragraphs']:,}")
        print(f"段落总token数: {stats['total_paragraph_tokens']:,}")
        print(f"平均段落长度: {stats['avg_tokens_per_para']:.1f} tokens")
        print(f"最短段落: {stats['min_tokens_per_para']} tokens")
        print(f"最长段落: {stats['max_tokens_per_para']} tokens")
        
        # 可视化长度分布
        self.visualize_length_distribution(stats)
        
        return stats
    
    def visualize_length_distribution(self, stats):
        """可视化段落长度分布"""
        dist = stats["length_distribution"]
        buckets = sorted(dist.keys())
        counts = [dist[b] for b in buckets]
        
        plt.figure(figsize=(12, 6))
        plt.bar(buckets, counts, width=40)
        plt.title("段落长度分布")
        plt.xlabel("段落长度 (tokens)")
        plt.ylabel("段落数量")
        plt.grid(True, axis='y', alpha=0.3)
        
        # 标记主要分位数
        plt.axvline(x=100, color='r', linestyle='--', alpha=0.5, label='100 tokens')
        plt.axvline(x=500, color='g', linestyle='--', alpha=0.5, label='500 tokens')
        plt.axvline(x=1000, color='b', linestyle='--', alpha=0.5, label='1000 tokens')
        
        plt.legend()
        plt.savefig("paragraph_length_distribution.png")
        plt.show()
    
    def sample_paragraphs(self, paragraphs, sample_fraction=0.1):
        """
        随机采样段落
        
        参数:
            paragraphs (list): 段落信息列表
            sample_fraction (float): 采样比例
            
        返回:
            list: 采样后的段落信息列表
        """
        print(f"\n随机采样 {sample_fraction*100}% 的段落...")
        
        # 随机采样
        sample_size = int(len(paragraphs) * sample_fraction)
        sampled_indices = random.sample(range(len(paragraphs)), sample_size)
        sampled_paragraphs = [paragraphs[i] for i in sorted(sampled_indices)]
        
        # 计算采样后的统计
        sampled_tokens = sum(p["length"] for p in sampled_paragraphs)
        print(f"采样段落数: {len(sampled_paragraphs):,}")
        print(f"采样token数: {sampled_tokens:,} ({sampled_tokens/self.total_tokens:.2%} of total)")
        
        return sampled_paragraphs
    
    def build_new_document(self, sampled_paragraphs, output_path):
        """
        构建新文档并保存
        
        参数:
            sampled_paragraphs (list): 采样的段落信息
            output_path (str): 输出文件路径
        """
        print(f"\n构建新文档: {output_path}")
        
        # 计算总token数
        total_tokens = sum(p["length"] for p in sampled_paragraphs) + len(sampled_paragraphs)
        
        # 创建输出内存映射
        output_mmap = np.memmap(
            output_path, 
            dtype=self.dtype, 
            mode='w+', 
            shape=(total_tokens,)
        )
        
        current_position = 0
        
        # 复制采样段落
        for para in tqdm(sampled_paragraphs, desc="构建新文档"):
            start, end = para["start"], para["end"]
            length = para["length"]
            
            # 复制段落内容
            output_mmap[current_position:current_position+length] = self.mmap[start:end]
            current_position += length
            
            # 添加段落分隔符
            if current_position < total_tokens:
                output_mmap[current_position] = self.paragraph_sep_token
                current_position += 1
        
        # 刷新写入磁盘
        output_mmap.flush()
        del output_mmap
        
        print(f"新文档创建完成! 大小: {os.path.getsize(output_path) / (1024**3):.2f} GB")
        
        # 保存段落元数据
        # meta_path = os.path.splitext(output_path)[0] + "_meta.json"
        # with open(meta_path, 'w') as f:
        #     json.dump({
        #         "original_file": self.file_path,
        #         "sampled_paragraphs": len(sampled_paragraphs),
        #         "total_tokens": total_tokens,
        #         "paragraphs": sampled_paragraphs
        #     }, f, indent=2)
        
        # print(f"元数据保存至: {meta_path}")
    
    def decode_sample_paragraphs(self, sampled_paragraphs, num_samples=5):
        """解码并展示采样段落示例"""
        print("\n采样段落内容示例:")
        
        for i in tqdm(range(min(num_samples, len(sampled_paragraphs))), desc="解码示例"):
            para = sampled_paragraphs[i]
            tokens = self.mmap[para["start"]:para["end"]].astype(np.int32)
            text = self.tokenizer.decode(tokens.tolist())
            
            print(f"\n段落 {i+1} (ID: {para['id']}, 长度: {para['length']} tokens):")
            print("-" * 80)
            print(text[:2000] + "..." if len(text) > 2000 else text)
            print("-" * 80)

def main():
    # 配置参数
    input_file = "./python/dolma/data/part-000-00000.npy"  # 替换为实际文件路径
    output_file = "sample-part-000-00000.npy"
    paragraph_sep_token = 187  # GPT-2的换行符token
    sample_fraction = 0.01  # 采样1/10
    
    # 初始化处理器
    processor = ParagraphProcessor(
        file_path=input_file,
        paragraph_sep_token=paragraph_sep_token,
        tokenizer_name="EleutherAI/gpt-neox-20b"
    )
    
    # 1. 查找所有段落
    paragraphs = processor.find_paragraphs(chunk_size_mb=200)
    
    # 2. 分析段落统计
    # stats = processor.analyze_paragraphs(paragraphs)
    
    # 3. 随机采样段落
    sampled_paragraphs = processor.sample_paragraphs(paragraphs, sample_fraction)
    
    # 4. 构建新文档
    processor.build_new_document(sampled_paragraphs, output_file)
    
    # 5. 解码示例段落
    processor.decode_sample_paragraphs(sampled_paragraphs)

if __name__ == "__main__":
    main()