import csv
import logging
import os

import dedupe
import dedupe.variables
import matplotlib.pyplot as plt
import numpy as np
from dedupe import RecordLink
from kneed import KneeLocator
from langchain_openai import ChatOpenAI

#配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

left_file = "prescpt_medicine.csv"
right_file = "got_medicine.csv"
output_file = "missing_medicine.csv"
settings_file = "medicine_matching_settings"
training_file = "medicine_matching_training.json"

llm = None
try:
    llm = ChatOpenAI(
        api_key="sk-514de7e7f4614e7088aea369174c4ced",
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        model="qwen3-235b-a22b",
        extra_body={"enable_thinking": False},
        temperature=0.0
    )
    logger.info(f"LLM initialized with model: qwen3")
except Exception as e:
    logger.error(f"Failed to initialize LLM: {e}")

#数据预处理
def preProcess(column):
    if column is None:
        return None
    column = str(column).strip().strip('"').strip("'").lower()
    return column if column else None

def readData(filename, is_left):
    data = {}
    prefix = "left_" if is_left else "right_"
    try:
        with open(filename, encoding="utf-8") as f:
            reader = csv.DictReader(f)
            for i, row in enumerate(reader):
                title = preProcess(row.get("title"))
                alias = preProcess(row.get("alias")) if not is_left and "alias" in row else None
                record = {
                    "title": title,
                    "alias": alias #无论是否为None，始终保留alias字段
                }
                data[f"{prefix}{i}"] = record
    except FileNotFoundError:
        logger.error(f"Error: File not found at {filename}")
        return {}
    except Exception as e:
        logger.error(f"Error reading data from {filename}: {e}")
        return {}
    return data

#工具函数
def llm_label_pair(record_pair, llm_instance):
    if not llm_instance:
        logger.warning("LLM instance not available for labeling.")
        return None

    record_1, record_2 = record_pair
    title_1 = record_1.get('title', "N/A")
    alias_1 = record_1.get('alias', "N/A")
    title_2 = record_2.get('title', "N/A")
    alias_2 = record_2.get('alias', "N/A")

    prompt = f"""
You are an expert pharmaceutical data analyst tasked with determining if two medicine records refer to the same entity.
Consider the medicine's official title and any known aliases.

Record 1:
Title: {title_1}
Alias: {alias_1}

Record 2:
Title: {title_2}
Alias: {alias_2}

Based on this information, do these two records represent the same medicine?
Respond with only one word: 'match', 'distinct', or 'uncertain'.
"""
    try:
        response = llm_instance.invoke(prompt)
        label_text = response.content.strip().lower() if hasattr(response, 'content') else str(response).strip().lower()
        if label_text == "match":
            return "match"
        elif label_text == "distinct":
            return "distinct"
        else:
            logger.warning(f"LLM returned ambiguous or unhandled label: '{label_text}'. Treating as uncertain for this pair.")
            return None
    except Exception as e:
        logger.error(f"Error during LLM API call or response parsing: {e}")
        return None

#dedupe部分+阈值调优+多轮匹配
def train_or_load_model(data_1, data_2, llm_instance, use_llm_labeling=True, num_llm_labels_target=100, llm_label_batch_size=5): #num_llm_labels_target:最大标注对数
    if os.path.exists(settings_file):
        logger.info(f"Loading trained model settings from {settings_file}")
        with open(settings_file, "rb") as sf:
            try:
                linker = dedupe.StaticRecordLink(sf)
                return linker
            except Exception as e:
                logger.error(f"Error loading settings file {settings_file}: {e}. Will attempt to retrain.")
    logger.info("Initializing new model for training.")
    fields = [
        dedupe.variables.String("title"),
        dedupe.variables.Text("alias", has_missing=True)
    ]
    linker = RecordLink(fields)
    if os.path.exists(training_file):
        logger.info(f"Loading existing training data from {training_file}")
        try:
            with open(training_file, 'r') as tf:
                linker.read_training(tf)
        except Exception as e:
            logger.warning(f"Could not read training file {training_file} or it's invalid: {e}. Starting fresh labeling.")
    logger.info("Preparing training data and finding candidate pairs for labeling...")
    linker.prepare_training(data_1, data_2, sample_size=min(len(data_1), len(data_2), 15000))
    if use_llm_labeling and llm_instance:
        logger.info(f"Starting LLM-assisted active learning. Target: {num_llm_labels_target} labels.")
        labeled_count = 0
        while labeled_count < num_llm_labels_target:
            uncertain_pairs_to_label = linker.uncertain_pairs()
            if not uncertain_pairs_to_label:
                logger.info("No more uncertain pairs for LLM labeling. Dedupe has enough information or no more candidates.")
                break
            pairs_for_llm_batch = uncertain_pairs_to_label[:llm_label_batch_size]
            labels_for_batch = {'match': [], 'distinct': []}
            processed_in_batch = 0
            for record_pair in pairs_for_llm_batch:
                if labeled_count >= num_llm_labels_target:
                    break
                llm_decision = llm_label_pair(record_pair, llm_instance)
                if llm_decision == "match":
                    labels_for_batch['match'].append(record_pair)
                    labeled_count += 1
                elif llm_decision == "distinct":
                    labels_for_batch['distinct'].append(record_pair)
                    labeled_count += 1
                else:
                    logger.debug(f"LLM uncertain for pair: {record_pair[0]['title']} <-> {record_pair[1]['title']}. Skipping.")
                processed_in_batch +=1
                if (labeled_count % 10 == 0 and labeled_count > 0) or processed_in_batch == len(pairs_for_llm_batch) :
                    logger.info(f"LLM labeled: {labeled_count}/{num_llm_labels_target} pairs.")
            if labels_for_batch['match'] or labels_for_batch['distinct']:
                linker.mark_pairs(labels_for_batch)
            if processed_in_batch == 0 and not (labels_for_batch['match'] or labels_for_batch['distinct']):
                logger.info("LLM was uncertain about all pairs in the current batch.")
                break
        logger.info(f"LLM-assisted labeling finished. Total pairs labeled by LLM in this session: {labeled_count} (considering only match/distinct).")
    else:
        if not (linker.training_pairs.get('match') or linker.training_pairs.get('distinct')):
            logger.info("LLM labeling not used or no LLM available. Starting manual console labeling.")
            print("Starting manual labeling for dedupe. Please follow the prompts.")
            dedupe.console_label(linker)
        else:
            logger.info("LLM labeling not used, but existing labels found. Proceeding to train.")
    if not linker.training_pairs or (not linker.training_pairs.get('match') and not linker.training_pairs.get('distinct')):
        logger.error("No training examples (match/distinct) available. Model cannot be trained.")
        logger.error("Try running LLM labeling, providing a training file, or using console_label.")
        return linker
    logger.info("Training the dedupe model...")
    try:
        linker.train(recall=0.90)
    except ValueError as e:
        logger.error(f"Error during model training: {e}. This might happen if there are too few labels or labels are all of one type.")
        return linker
    logger.info("Saving model settings and training data.")
    with open(settings_file, "wb") as sf:
        linker.write_settings(sf)
    with open(training_file, "w") as tf:
        linker.write_training(tf)
        logger.info(f"Training data saved to {training_file}")
    return linker

def threshold_tuning(linker, data_1_full, data_2_full):
    logger.info("Starting threshold tuning...")
    if not data_1_full or not data_2_full:
        logger.warning("Input data for threshold tuning is empty. Using default threshold 0.8.")
        return 0.8
    try:
        logger.info("Clustering data with a low threshold to get score distribution for tuning...")
        all_clustered_pairs = linker.join(data_1_full, data_2_full, threshold=0.01)
    except Exception as e:
        logger.error(f"Error during linker.join in threshold_tuning: {e}. Using default threshold 0.8.")
        return 0.8
    if not all_clustered_pairs:
        logger.warning("No pairs found by linker.join for threshold tuning. Using default threshold 0.8.")
        return 0.8
    scores = np.array([score for _, score in all_clustered_pairs if score > 0])
    if len(scores) < 10:
        logger.warning(f"Too few scored pairs ({len(scores)}) for reliable kneed analysis. Using default threshold 0.8 or max score.")
        return max(0.8, np.max(scores) * 0.85) if len(scores) > 0 else 0.8
    thresholds_to_test = np.linspace(min(scores) if len(scores)>0 else 0.3, max(scores) if len(scores)>0 else 0.85, num=60)
    counts = []
    for t in thresholds_to_test:
        counts.append(np.sum(scores >= t))
    if not any(c > 0 for c in counts):
        logger.warning("No matches found across any tested thresholds for tuning. Using default 0.8")
        best_threshold = 0.8
    else:
        try:
            poly_degree = min(7, len(thresholds_to_test) - 1) if len(thresholds_to_test) > 7 else max(1, len(thresholds_to_test) -1)
            if poly_degree < 1 :
                 logger.warning("Not enough data points for polynomial interpolation in kneedle. Defaulting threshold.")
                 best_threshold = 0.8
            else:
                kneedle = KneeLocator(
                    thresholds_to_test, counts,
                    curve="concave", direction="decreasing", S=2.0,
                    interp_method="polynomial", polynomial_degree=poly_degree
                )
                best_threshold = kneedle.knee
                if best_threshold is None:
                    logger.warning("Kneedle could not find a knee point. Trying online=True or defaulting.")
                    kneedle_online = KneeLocator(thresholds_to_test, counts, curve="concave", direction="decreasing", S=2.0, online=True, interp_method="polynomial", polynomial_degree=poly_degree)
                    best_threshold = kneedle_online.knee
                if best_threshold is None:
                     logger.warning("Kneedle still could not find a knee. Defaulting threshold to 0.8 or a high percentile of scores.")
                     if len(scores) > 20:
                         best_threshold = np.percentile(scores, 75)
                     else:
                         best_threshold = 0.8
                     best_threshold = max(0.5, min(0.95, best_threshold))
        except Exception as e:
            logger.error(f"Error during KneeLocator analysis: {e}. Defaulting threshold to 0.8")
            best_threshold = 0.8
    best_threshold = float(max(0.4, min(0.95, best_threshold)))
    plt.figure(figsize=(12, 7))
    plt.plot(thresholds_to_test, counts, label="Match Count vs. Threshold", color='blue', marker='.', linestyle='-')
    try:
        idx_at_knee = (np.abs(thresholds_to_test - best_threshold)).argmin()
        count_at_knee = counts[idx_at_knee]
        plt.scatter([best_threshold], [count_at_knee], color='red', s=100, zorder=5, label=f"Chosen Threshold: {best_threshold:.3f}")
        plt.axvline(best_threshold, color='red', linestyle='--', lw=2)
    except Exception as e:
        logger.warning(f"Could not plot knee point details: {e}")
    plt.xlabel("Score Threshold")
    plt.ylabel("Number of Pairs Above Threshold")
    plt.title("Threshold Tuning: Match Count Elbow Method")
    plt.legend()
    plt.grid(True, which='both', linestyle='--', linewidth=0.5)
    plt.tight_layout()
    try:
        plt.savefig("threshold_tuning_plot.png")
        logger.info(f"Optimal threshold estimated at {best_threshold:.3f}. Tuning plot saved to threshold_tuning_plot.png")
    except Exception as e:
        logger.error(f"Failed to save threshold tuning plot: {e}")
    return best_threshold

class HierarchicalMatcher:
    def __init__(self, data_1_orig, data_2_orig, threshold):
        self.data_1 = data_1_orig.copy()
        self.data_2 = data_2_orig.copy()
        self.threshold = threshold
        self.matches = []
    def _exact_match(self):
        logger.info("Performing exact matching step...")
        exact_matches_found = []
        title_to_ids_data1 = {}
        for id1, record1 in self.data_1.items():
            title1 = record1.get("title")
            if title1:
                if title1 not in title_to_ids_data1:
                    title_to_ids_data1[title1] = []
                title_to_ids_data1[title1].append(id1)
        data2_ids_matched_exactly = set()
        for id2, record2 in self.data_2.items():
            title2 = record2.get("title")
            if title2 and title2 in title_to_ids_data1:
                for id1 in title_to_ids_data1[title2]:
                    exact_matches_found.append(((id1, id2), 1.0))
                    data2_ids_matched_exactly.add(id2)
        logger.info(f"Found {len(exact_matches_found)} exact matches.")
        return exact_matches_found, data2_ids_matched_exactly
    def _dedupe_match(self, linker, data_1_to_match, data_2_to_match):
        if not data_1_to_match or not data_2_to_match:
            logger.info("One or both datasets for dedupe matching are empty. Skipping dedupe step.")
            return []
        logger.info(f"Performing dedupe model matching on {len(data_1_to_match)} (left) vs {len(data_2_to_match)} (right) records with threshold {self.threshold:.3f}.")
        deduped_pairs = linker.join(data_1_to_match, data_2_to_match, threshold=self.threshold)
        logger.info(f"Found {len(deduped_pairs)} matches from dedupe model.")
        return deduped_pairs
    def run(self, linker):
        exact_matches_pairs, data2_ids_matched_exactly = self._exact_match()
        self.matches.extend([(pair[0], pair[1], score) for pair, score in exact_matches_pairs])
        data1_ids_matched_exactly = {pair[0] for pair, _ in exact_matches_pairs}
        data_1_for_dedupe = {
            k: v for k, v in self.data_1.items() if k not in data1_ids_matched_exactly
        }
        data_2_for_dedupe = {
            k: v for k, v in self.data_2.items() if k not in data2_ids_matched_exactly
        }
        logger.info(f"Remaining for dedupe: {len(data_1_for_dedupe)} (left), {len(data_2_for_dedupe)} (right) records.")
        if data_1_for_dedupe and data_2_for_dedupe:
            deduped_matches_pairs = self._dedupe_match(linker, data_1_for_dedupe, data_2_for_dedupe)
            self.matches.extend([(pair[0], pair[1], score) for pair, score in deduped_matches_pairs])
        else:
            logger.info("Skipping dedupe matching stage as one or both datasets are empty after exact matching.")
        final_unique_matches = {}
        for l_id, r_id, score in self.matches:
            pair_key = tuple(sorted((l_id, r_id)))
            if pair_key not in final_unique_matches or score > final_unique_matches[pair_key][2]:
                 final_unique_matches[pair_key] = (l_id, r_id, score)
        self.matches = list(final_unique_matches.values())
        logger.info(f"Total unique matches after all stages: {len(self.matches)}")
        return self.matches


#主逻辑
if __name__ == "__main__":
    if not llm:
        logger.critical("LLM client failed to initialize. LLM-based labeling cannot proceed. Exiting.")
        exit(1)
    logger.info("--- Starting Medicine Matching Process ---")
    logger.info(f"Reading left data from: {left_file}")
    data_1 = readData(left_file, is_left=True)
    logger.info(f"Left data loaded: {len(data_1)} records.")
    logger.info(f"Reading right data from: {right_file}")
    data_2 = readData(right_file, is_left=False)
    logger.info(f"Right data loaded: {len(data_2)} records.")
    if not data_1 or not data_2:
        logger.error("One or both input data files are empty or failed to load. Cannot proceed.")
        exit(1)
    linker = train_or_load_model(data_1, data_2, llm,
                                 use_llm_labeling=True,
                                 num_llm_labels_target=100,
                                 llm_label_batch_size=10)
    if not hasattr(linker, 'match_blocks') and not isinstance(linker, dedupe.StaticRecordLink):
         if not (linker.training_pairs and (linker.training_pairs.get('match') or linker.training_pairs.get('distinct'))):
            logger.error("Model training failed or no labels provided. Cannot proceed with matching.")
            exit(1)
    logger.info("Dedupe model is ready.")
    best_threshold = threshold_tuning(linker, data_1, data_2)
    logger.info(f"Initializing Hierarchical Matcher with threshold: {best_threshold:.3f}")
    hierarchical_matcher = HierarchicalMatcher(data_1, data_2, threshold=best_threshold)
    all_matches = hierarchical_matcher.run(linker)
    matched_left_ids = {l_id for l_id, r_id, score in all_matches}
    logger.info(f"Total matched pairs found: {len(all_matches)}. Unique left records matched: {len(matched_left_ids)}")
    logger.info(f"Generating report of unmatched medicine titles from '{left_file}'...")
    missing_titles_list = []
    for rid, record_content in data_1.items():
        if rid not in matched_left_ids:
            title = record_content.get("title")
            if title:
                missing_titles_list.append(title)
    unique_missing_titles = sorted(list(set(missing_titles_list)))
    try:
        with open(output_file, "w", newline='', encoding="utf-8") as f:
            writer = csv.writer(f)
            writer.writerow(["title"])
            for title_to_write in unique_missing_titles:
                writer.writerow([title_to_write])
        logger.info(f"Found {len(unique_missing_titles)} unique unmatched medicine titles from '{left_file}'. Report saved to '{output_file}'")
    except Exception as e:
        logger.error(f"Error writing output file {output_file}: {e}")
    logger.info("--- Medicine Matching Process Completed ---")