import json
import pandas as pd
import numpy as np

current_line_number = 0

def parse_sections(content: str) -> dict:
    """
    将 user content 解析为不同的逻辑部分。
    返回一个字典，包含 'before_line', 'line_table', 'between_line_and_node', 
    'node_table', 'between_node_and_power', 'power_and_fault' 部分。
    """
    lines = content.strip().split('\n')
    sections = {
        'before_line': [],
        'line_table': [],
        'between_line_and_node': [],
        'node_table': [],
        'between_node_and_power': [],
        'power_and_fault': []
    }
    
    current_section = 'before_line'
    
    line_start_marker = "line impedance table is as follows: From | To | Impedance | Status"
    node_start_marker = "node load table is as follows: Bus|Pd|Qd"
    power_start_marker = "power source capacity table is as follows: Bus | Capacity"
    fault_marker = "The fault occurs at"
    
    line_started = False
    node_started = False
    power_or_fault_started = False 
    
    for line in lines:
        # Determine section transitions
        if line.strip() == line_start_marker:
            current_section = 'line_table'
            line_started = True
            sections[current_section].append(line)
            continue # Add header to line_table and move to next line
            
        if line.strip() == node_start_marker:
            current_section = 'node_table'
            node_started = True
            sections[current_section].append(line)
            continue # Add header to node_table and move to next line

        # Check for start of power table or fault line
        if (power_start_marker in line) or (fault_marker in line):
             # Once we see power table or fault, we are in the final section
             current_section = 'power_and_fault'
             power_or_fault_started = True
             sections[current_section].append(line)
             continue # Add line to power_and_fault and move to next line

        if current_section == 'line_table':
            pass 
            
        # Add the line to the determined section
        sections[current_section].append(line)
        
    # Find actual start indices
    line_start_idx = content.find(line_start_marker)
    node_start_idx = content.find(node_start_marker)
    power_start_idx = content.find(power_start_marker)
    fault_start_idx = content.find(fault_marker)
    
    # Determine the true start of the final section (whichever comes first, power or fault, or -1 if neither)
    power_or_fault_start_idx = -1
    if power_start_idx != -1 and fault_start_idx != -1:
        power_or_fault_start_idx = min(power_start_idx, fault_start_idx)
    elif power_start_idx != -1:
        power_or_fault_start_idx = power_start_idx
    elif fault_start_idx != -1:
        power_or_fault_start_idx = fault_start_idx
        
    
    # Redefine sections based on string positions for accuracy
    sections = {
        'before_line': '',
        'line_table': '',
        'between_line_and_node': '',
        'node_table': '',
        'between_node_and_power': '',
        'power_and_fault': ''
    }
    
    if line_start_idx == -1:
        # If no line table marker, everything is 'before_line'
        sections['before_line'] = content
        return sections
        
    # Extract 'before_line'
    sections['before_line'] = content[:line_start_idx].rstrip('\n')
    
    # Start of line table is the marker line itself
    line_end_pos = len(content) # Default end
    line_data_start_pos = line_start_idx + len(line_start_marker) + 1 # +1 for newline
    
    # Find end of line table (start of node table or power/fault or end of content)
    if node_start_idx != -1:
        line_end_pos = node_start_idx
    elif power_or_fault_start_idx != -1:
        line_end_pos = power_or_fault_start_idx
    # else, line_end_pos remains end of content
    
    # Extract line table (marker + data)
    sections['line_table'] = content[line_start_idx:line_end_pos].rstrip('\n')
    
    # If there's a node table
    if node_start_idx != -1:
        node_end_pos = len(content) # Default end
        node_data_start_pos = node_start_idx + len(node_start_marker) + 1
        
        # Find end of node table (start of power/fault or end of content)
        if power_or_fault_start_idx != -1 and power_or_fault_start_idx > node_start_idx:
            node_end_pos = power_or_fault_start_idx
            
        sections['node_table'] = content[node_start_idx:node_end_pos].rstrip('\n')
        # Extract parts between
        sections['between_line_and_node'] = content[line_end_pos:node_start_idx].rstrip('\n')
        sections['between_node_and_power'] = content[node_end_pos:].rstrip('\n') # This includes power and fault
    else:
        # No node table, everything after line table till power/fault or end is 'between_line_and_node'
        intermediate_end = power_or_fault_start_idx if power_or_fault_start_idx != -1 else len(content)
        sections['between_line_and_node'] = content[line_end_pos:intermediate_end].rstrip('\n')
        sections['between_node_and_power'] = content[intermediate_end:].rstrip('\n') # This includes power and fault
        
    # Assign power and fault section
    sections['power_and_fault'] = sections['between_node_and_power'] # Alias for clarity
    
    return sections

def parse_line_table(content: str) -> pd.DataFrame:
    """从 user content 中解析 line impedance table"""
    lines = content.strip().split('\n')
    table_start_marker = "line impedance table is as follows: From | To | Impedance | Status"
    table_started = False
    table_lines = []
    
    for line in lines:
        if line.strip() == table_start_marker:
            table_started = True
            continue # Skip the header line itself
        if table_started:
            if line.strip() == "" or line.startswith("node load table") or \
               line.startswith("power source capacity table") or \
               "The fault occurs at" in line:
                break # End of table
            parts = [p.strip() for p in line.split('|')]
            if len(parts) == 4:
                try:
                    # Attempt to convert Status to int for consistency
                    parts[3] = int(parts[3])
                except ValueError:
                    pass # Keep as string if conversion fails (shouldn't happen with valid data)
                table_lines.append(parts)
            else:
                 # Handle potential multi-line table or parsing issues gracefully
                 # For now, we assume the table is well-formed
                 pass 
    
    if not table_lines:
        raise ValueError("Could not find or parse the line impedance table.")
        
    df = pd.DataFrame(table_lines, columns=['From', 'To', 'Impedance', 'Status'])
    # Ensure data types are appropriate
    df['From'] = df['From'].astype(str)
    df['To'] = df['To'].astype(str)
    # Status can stay as object to handle int and potential string representations if needed
    return df

def parse_node_table_data(content: str) -> pd.DataFrame:
    """从 user content 中解析 node load table 数据部分为 DataFrame"""
    lines = content.strip().split('\n')
    table_start_marker = "node load table is as follows: Bus|Pd|Qd"
    table_end_markers = ["power source capacity table is as follows:", "The fault occurs at"]
    table_started = False
    table_lines = []
    
    for line in lines:
        if line.strip() == table_start_marker:
            table_started = True
            continue # Skip the header line
        if table_started:
            # Check for end markers
            if any(marker in line for marker in table_end_markers) or line.strip() == "":
                break
            parts = [p.strip() for p in line.split('|')]
            if len(parts) == 3:
                 table_lines.append(parts)
    
    if not table_lines:
        raise ValueError("Could not find or parse the node load table data.")
        
    df = pd.DataFrame(table_lines, columns=['Bus', 'Pd', 'Qd'])
    # Ensure data types are appropriate, try to convert Pd and Qd to float if possible
    df['Bus'] = df['Bus'].astype(str)
    try:
        df['Pd'] = pd.to_numeric(df['Pd'])
    except ValueError:
        pass # Keep as string if conversion fails
    try:
        df['Qd'] = pd.to_numeric(df['Qd'])
    except ValueError:
        pass # Keep as string if conversion fails
    return df

def parse_node_table_header_and_footer(content: str) -> tuple:
    """提取 node load table 的标题和它之后直到下一部分的内容（不包括数据）"""
    lines = content.strip().split('\n')
    table_start_marker = "node load table is as follows: Bus|Pd|Qd"
    table_end_markers = ["power source capacity table is as follows:", "The fault occurs at"]
    table_started = False
    header_lines = []
    footer_lines = []
    current_section = 'before_table' # before_table, header, data, footer
    
    for line in lines:
        if current_section == 'before_table':
            if line.strip() == table_start_marker:
                current_section = 'header'
                header_lines.append(line) # Include the header line
            # else: ignore lines before the table header
        elif current_section == 'header':
             # The header is just the one line, next non-empty line is either data or end
             if line.strip() == "":
                 continue # Skip empty line after header if any
             else:
                 # Check if this line is data or the start of the next section
                 if any(marker in line for marker in table_end_markers):
                     current_section = 'footer'
                     footer_lines.append(line)
                 elif len([p.strip() for p in line.split('|')]) == 3:
                     if not any(marker in line for marker in table_end_markers):
                         current_section = 'data' # Data section implicitly starts
                         # Do not add this line to header or footer
                     else:
                         current_section = 'footer'
                         footer_lines.append(line)
        elif current_section == 'data':
            # We are skipping data lines. Check if this line is a footer line.
            if any(marker in line for marker in table_end_markers) or line.strip() == "":
                 current_section = 'footer'
                 if line.strip() != "": # Add non-empty footer lines
                     footer_lines.append(line)
        elif current_section == 'footer':
            footer_lines.append(line)
            
    # Handle case where table is at the very end
    if current_section == 'header':
        # This means header was found but no data or footer followed (unlikely but possible)
        pass # header_lines is already set

    header_line = ""
    footer_start_idx = len(lines) # Default to end of file
    header_idx = -1
    for i, line in enumerate(lines):
        if line.strip() == table_start_marker:
            header_line = line
            header_idx = i
        # Find the first line that is clearly not data and not empty, after the header
        if header_idx != -1 and i > header_idx:
            parts = [p.strip() for p in line.split('|')]
            # If it doesn't look like data (3 parts, numeric Pd/Qd likely) and not empty
            if line.strip() != "" and not (len(parts) == 3): # Simplistic check
                # If it's an end marker or seems like a new section
                if any(marker in line for marker in table_end_markers):
                    footer_start_idx = i
                    break
            # If we reach the end without finding a clear footer start,
            # footer_start_idx remains len(lines)
    
    # Footer is everything from footer_start_idx onwards
    footer_lines = lines[footer_start_idx:] if footer_start_idx < len(lines) else []
    
    if not header_line:
         raise ValueError("Could not find the node load table header.")
         
    return header_line, "\n".join(footer_lines)


def reconstruct_node_table_str(header_line: str, df_nodes_shuffled: pd.DataFrame) -> str:
    """根据打乱的 DataFrame 重构 node table 字符串"""
    # Start with the header
    table_parts = [header_line]
    
    # Add the shuffled data rows
    # Format each row as "Bus|Pd|Qd"
    for _, row in df_nodes_shuffled.iterrows():
        # Convert Pd and Qd back to string, handling potential NaN if data was missing/corrupt
        pd_val = row['Pd'] if pd.notna(row['Pd']) else ""
        qd_val = row['Qd'] if pd.notna(row['Qd']) else ""
        table_parts.append(f"{row['Bus']}|{pd_val}|{qd_val}")
        
    return "\n".join(table_parts)

def parse_capacity_table_and_fault(content: str) -> str:
    """提取 power source capacity table 和 fault 信息 (作为字符串返回)"""
    lines = content.strip().split('\n')
    table_start_marker = "power source capacity table is as follows: Bus | Capacity"
    table_started = False
    table_lines = []
    
    for line in lines:
        if line.strip() == table_start_marker:
            table_started = True
        if table_started:
            table_lines.append(line)
        elif "The fault occurs at" in line:
             if not table_started and not table_lines:
                 table_lines.append(line)
             elif table_started:
                 table_lines.append(line)
                 break 
    
    if not any("The fault occurs at" in l for l in table_lines):
        for line in lines:
            if "The fault occurs at" in line:
                table_lines.append(line)
                break

    return "\n".join(table_lines) if table_lines else ""


def reconstruct_user_content(original_content: str, df_lines_shuffled: pd.DataFrame, df_nodes_shuffled: pd.DataFrame) -> str:
    """根据打乱的 DataFrame 重构 user 的 content"""
    
    # 1. 解析原始内容的各个部分
    sections = parse_sections(original_content)
    
    # 2. 重构 line table
    line_table_header = "line impedance table is as follows: From | To | Impedance | Status"
    formatted_line_rows = []
    for _, row in df_lines_shuffled.iterrows():
        # Status should be handled correctly, likely as int based on parsing
        status_str = str(int(row['Status'])) if isinstance(row['Status'], (int, float)) and not pd.isna(row['Status']) else str(row['Status'])
        formatted_line_rows.append(f"{row['From']}|{row['To']}|{row['Impedance']}|{status_str}")
    new_line_table_str = line_table_header + ("\n" if formatted_line_rows else "") + "\n".join(formatted_line_rows)

    # 3. 重构 node table
    node_table_header = "node load table is as follows: Bus|Pd|Qd"
    formatted_node_rows = []
    for _, row in df_nodes_shuffled.iterrows():
        # Pd and Qd should be numeric, handle potential NaN
        pd_val = f"{row['Pd']:.6g}" if isinstance(row['Pd'], (int, float)) and not pd.isna(row['Pd']) else str(row['Pd']) if not pd.isna(row['Pd']) else ""
        qd_val = f"{row['Qd']:.6g}" if isinstance(row['Qd'], (int, float)) and not pd.isna(row['Qd']) else str(row['Qd']) if not pd.isna(row['Qd']) else ""
        formatted_node_rows.append(f"{row['Bus']}|{pd_val}|{qd_val}")
    new_node_table_str = node_table_header + ("\n" if formatted_node_rows else "") + "\n".join(formatted_node_rows)

    # 4. 组合所有部分，确保顺序正确且不重复
    # The order should be: before_line, line_table, between_line_and_node, 
    #                      node_table, between_node_and_power (which contains power_and_fault)
    parts_to_join = [
        sections['before_line'],
        new_line_table_str,
        sections['between_line_and_node'], # This might contain newlines or comments
        new_node_table_str,
        sections['between_node_and_power'] # This contains the power table and fault info
    ]
    
    final_parts = []
    for part in parts_to_join:
        if part.strip(): # Only add non-empty parts
            final_parts.append(part.rstrip('\n')) # Remove trailing newlines to avoid doubling
    
    return "\n".join(final_parts) + ("\n" if final_parts else "") # Ensure final newline if content exists


def update_assistant_actions(original_assistant_content: str, df_lines_shuffled: pd.DataFrame, line_number: int) -> str:
    """根据打乱的 DataFrame 更新 assistant 的 action 列表，并接收行号用于错误报告"""
    try:
        assistant_data = json.loads(original_assistant_content)
    except json.JSONDecodeError as e:
        print(f"Warning (Line {line_number}): Could not parse assistant content as JSON. Error: {e}. Returning original.")
        return original_assistant_content

    if not assistant_data.get("success", False) or "action" not in assistant_data:
        # If not successful or no actions, no need to change
        return original_assistant_content

    original_actions = assistant_data["action"]
    updated_actions = []

    # --- 修改循环部分 ---
    for i, action_str in enumerate(original_actions):
        try:
            # 1. 移除首尾空白
            action_str = action_str.strip()
            # 2. 检查是否为空或仅由括号组成
            if not action_str or action_str in ["()", "(", ")", ""]:
                 raise ValueError("Action string is empty or only brackets")

            parts = action_str.strip("()").split(',')
            if len(parts) != 3:
                raise ValueError(f"Action format is incorrect, expected 3 parts, got {len(parts)}")

            from_bus, to_bus, status = parts[0].strip(), parts[1].strip(), parts[2].strip()
            
            # 尝试转换 status 为整数
            try:
                status = int(status)
            except ValueError:
                raise ValueError(f"Status '{status}' is not a valid integer")

        except (ValueError, IndexError) as e:
            # --- 修改警告信息 ---
            print(f"Warning (Line {line_number}, Action Index {i}): Could not parse action string '{action_str}' from list {original_actions}. Error: {e}. Skipping.")
            continue # 跳过这个 action，继续处理下一个

        # --- 查找逻辑保持不变 ---
        mask_direct = (df_lines_shuffled['From'] == from_bus) & (df_lines_shuffled['To'] == to_bus)
        matched_rows_direct = df_lines_shuffled[mask_direct]

        mask_reverse = (df_lines_shuffled['From'] == to_bus) & (df_lines_shuffled['To'] == from_bus)
        matched_rows_reverse = df_lines_shuffled[mask_reverse]

        new_action_tuple = None
        if not matched_rows_direct.empty:
            idx = matched_rows_direct.index[0]
            new_action_tuple = (from_bus, to_bus, status, idx)
        elif not matched_rows_reverse.empty:
            idx = matched_rows_reverse.index[0]
            new_action_tuple = (to_bus, from_bus, status, idx) # Report in table's order
        else:
            print(f"Warning (Line {line_number}, Action Index {i}): Action branch ({from_bus}, {to_bus}) not found in shuffled line table (list: {original_actions}). Skipping.")
            continue

        if new_action_tuple:
            updated_actions.append(new_action_tuple)

    # --- 排序和重构逻辑保持不变 ---
    updated_actions.sort(key=lambda x: x[3]) # Sort by index
    final_action_strings = [f"({a[0]},{a[1]},{a[2]})" for a in updated_actions]

    assistant_data["action"] = final_action_strings
    return json.dumps(assistant_data, ensure_ascii=False)


def process_sample(sample: dict, seed: int, line_number: int) -> dict:
    """处理单个样本，并接收行号"""
    global current_line_number
    current_line_number = line_number # 将行号存入全局变量供其他函数使用
    
    user_content = sample['messages'][1]['content']
    original_assistant_content = sample['messages'][2]['content']

    # --- 处理 Line Table ---
    df_lines = parse_line_table(user_content)
    df_lines_shuffled = df_lines.sample(frac=1, random_state=seed).reset_index(drop=True)
    
    np.random.seed(seed)
    swap_flags = np.random.randint(0, 2, size=len(df_lines_shuffled))
    mask_swap = (df_lines_shuffled['Status'] != -1) & (swap_flags == 1)
    df_lines_shuffled.loc[mask_swap, ['From', 'To']] = df_lines_shuffled.loc[mask_swap, ['To', 'From']].values

    # --- 处理 Node Table ---
    df_nodes = parse_node_table_data(user_content)
    df_nodes_shuffled = df_nodes.sample(frac=1, random_state=seed).reset_index(drop=True)

    # --- 重构 User Content ---
    new_user_content = reconstruct_user_content(user_content, df_lines_shuffled, df_nodes_shuffled)
    sample['messages'][1]['content'] = new_user_content

    # --- 更新 Assistant Content ---
    # 将行号传递给 update_assistant_actions
    new_assistant_content = update_assistant_actions(original_assistant_content, df_lines_shuffled, line_number)
    sample['messages'][2]['content'] = new_assistant_content

    return sample


def main():
    input_file = "samples\\402算例_时序负荷数据\\train_data.jsonl"
    output_file = "samples\\402算例_时序负荷数据\\train_new2.jsonl"

    processed_data = []
    seed_counter = 20 # 可以用行号作为 seed，或者保持原样
    line_number = 0 # 添加行号计数器

    with open(input_file, 'r', encoding='utf-8') as f_in:
        for line in f_in:
            line_number += 1 # 每读一行，行号加一
            line = line.strip()
            if not line: # 跳过空行
                continue
            try:
                sample = json.loads(line)
                # 将行号传递给 process_sample
                # 也可以将 line_number 用作 seed: processed_sample = process_sample(sample, seed=line_number, line_number=line_number)
                # 或者保持原来的 seed 逻辑，只传递行号用于错误报告
                processed_sample = process_sample(
                    sample, seed=seed_counter, line_number=line_number
                )
                processed_data.append(processed_sample)
                seed_counter += 1 # 如果使用 seed_counter 作为 seed
            except Exception as e:
                print(f"Error processing line {line_number}: {e}")
                import traceback
                traceback.print_exc()
                continue

    with open(output_file, 'w', encoding='utf-8') as f_out:
        for sample in processed_data:
            f_out.write(json.dumps(sample, ensure_ascii=False) + '\n')
        f_out.flush()

    print(f"Processing complete. Output written to {output_file}")

# --- 确保 main 函数是入口点 ---
if __name__ == "__main__":
    main()
