# main.py
import os
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from tqdm import tqdm

warnings.filterwarnings('ignore')

# ---------- PERT 工具 ----------
class PertDistribution:
    @staticmethod
    def rvs(a, b, c, size=None, random_state=None):
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return a + (c - a) * stats.beta.rvs(alpha, beta, size=size, random_state=random_state)

    @staticmethod
    def pdf(x, a, b, c):
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return stats.beta.pdf((x - a) / (c - a), alpha, beta) / (c - a)

    @staticmethod
    def theoretical_skewness(a, b, c):
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return float(stats.beta(alpha, beta).stats(moments='s'))

    @staticmethod
    def theoretical_kurtosis(a, b, c):
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return float(stats.beta(alpha, beta).stats(moments='k'))

    @staticmethod
    def theoretical_mean(a, b, c):
        return (a + 4 * b + c) / 6

    # @staticmethod
    # Approximate calculation
    # def theoretical_var(a, b, c):
    #     return ((c - a) ** 2 * (a + 4 * b + c) * (5 * a + 8 * b + 5 * c)) / \
    #            (36 * (a + 4 * b + c) ** 2)
    @staticmethod
    def theoretical_var(a, b, c):
        alpha = 1 + 4 * (b - a) / (c - a)
        beta  = 1 + 4 * (c - b) / (c - a)
        scale = c - a
        return scale**2 * alpha * beta / ((alpha + beta)**2 * (alpha + beta + 1))

# ---------- 采样主流程 ----------
class SamplingProcessor:
    def __init__(self, config_path):
        self.config = pd.read_csv(config_path)
        self.results = []

    # ---------------- 收敛 / KS 检验 ----------------
    def check_convergence(self, samples):
        return dict(mean=np.mean(samples),
                    var=np.var(samples),
                    median=np.median(samples),
                    skewness=stats.skew(samples),
                    kurtosis=stats.kurtosis(samples, fisher=True))

    def ks_test(self, samples, params):
        theory = PertDistribution.rvs(params['a'], params['b'], params['c'],
                                      size=10000, random_state=42)
        stat, p = stats.ks_2samp(samples, theory)
        return p >= 0.05, p

    # ---------------- 预采样 ----------------
    def pre_sampling(self, row):
        a, b, c = row['a'], row['b'], row['c']
        n, max_n, inc = row['initial_n'], row['max_total_n'], row['increment_n']
        seed = row['random_seed']
        thr_sk, thr_ku = row['skewness_threshold'], row['kurtosis_threshold']

        while n <= max_n:
            s = PertDistribution.rvs(a, b, c, size=n, random_state=seed)
            m = self.check_convergence(s)
            sk_d = abs(m['skewness'] - PertDistribution.theoretical_skewness(a, b, c))
            ku_d = abs(m['kurtosis'] - PertDistribution.theoretical_kurtosis(a, b, c))
            if sk_d <= thr_sk and ku_d <= thr_ku:
                if n > row['initial_n']:
                    prev = PertDistribution.rvs(a, b, c, size=n - inc, random_state=seed)
                    pm = self.check_convergence(prev)
                    if abs(m['mean'] - pm['mean']) / abs(pm['mean']) <= 0.005 \
                       and abs(m['var'] - pm['var']) / abs(pm['var']) <= 0.03:
                        return n, s, seed
                else:
                    return n, s, seed
            n += inc
            seed += 1
        return n - inc, s, seed

    # ---------------- 正式采样 ----------------
    def formal_sampling(self, row, n_unified, seed):
        a, b, c = row['a'], row['b'], row['c']
        max_try = 100
        thr_sk, thr_ku = row['skewness_threshold'], row['kurtosis_threshold']
        for _ in range(max_try):
            s = PertDistribution.rvs(a, b, c, size=n_unified, random_state=seed)
            m = self.check_convergence(s)
            sk_d = abs(m['skewness'] - PertDistribution.theoretical_skewness(a, b, c))
            ku_d = abs(m['kurtosis'] - PertDistribution.theoretical_kurtosis(a, b, c))
            if sk_d > thr_sk or ku_d > thr_ku:
                seed += 1
                continue
            ok, p = self.ks_test(s, dict(a=a, b=b, c=c))
            if ok:
                return s, ok, p, True, seed
            seed += 1
        return s, False, None, False, seed

    # ---------------- 绘图 ----------------
    def generate_plots(self, samples, floor_name, output_path):
        row = self.config[self.config['floor'] == floor_name].iloc[0]
        a, b, c = row['a'], row['b'], row['c']

        sample_skew = stats.skew(samples)
        sample_kurt = stats.kurtosis(samples, fisher=True)
        theory_skew = PertDistribution.theoretical_skewness(a, b, c)
        theory_kurt = PertDistribution.theoretical_kurtosis(a, b, c)

        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        rv = stats.beta(alpha, beta, loc=a, scale=c - a)

        fig, axes = plt.subplots(2, 2, figsize=(12, 10))
        fig.suptitle(f'PERT Distribution Validation - Floor {floor_name}', fontsize=14)

        # (a) Histogram
        ax = axes[0, 0]
        ax.hist(samples, bins=30, density=True, alpha=0.7, label='Sample Histogram')
        x = np.linspace(a, c, 500)
        ax.plot(x, rv.pdf(x), 'r-', label='Theoretical PDF')
        ax.set_xlabel('Value')
        ax.set_ylabel('Density')
        ax.legend()
        ax.text(0.98, 0.97,
                f'Skewness: {sample_skew:.3f}\nKurtosis: {sample_kurt:.3f}\n'
                f'(Theory: {theory_skew:.3f}, {theory_kurt:.3f})',
                transform=ax.transAxes, ha='right', va='top',
                bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8))
        ax.set_title('(a) Histogram vs Theoretical PDF', y=-0.2)

        # (b) QQ
        ax = axes[0, 1]
        stats.probplot(samples, dist=rv, plot=ax)
        ax.set_title('(b) Q-Q Plot', y=-0.2)  # title below
        ax.set_xlabel('Theoretical Quantiles')
        ax.set_ylabel('Sample Quantiles')

        # (c) CDF
        ax = axes[1, 0]
        sorted_s = np.sort(samples)
        ecdf = np.arange(1, len(sorted_s) + 1) / len(sorted_s)
        ax.step(sorted_s, ecdf, where='post', label='Sample CDF')
        ax.plot(x, rv.cdf(x), 'r-', label='Theoretical CDF')
        ax.set_xlabel('Value')
        ax.set_ylabel('CDF')
        ax.legend()
        ax.set_title('(c) Empirical vs Theoretical CDF', y=-0.2)

        # (d) Box
        ax = axes[1, 1]
        ax.boxplot(samples)
        ax.set_ylabel('Value')
        ax.set_title('(d) Box Plot Analysis', y=-0.2)

        plt.tight_layout(rect=[0, 0, 1, 0.96])
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        plt.savefig(output_path, dpi=300)
        plt.close()

    # ---------------- 汇总偏差条形图 ----------------
    def plot_summary_bias(self, df):
        out_dir = os.path.dirname(self.config.iloc[0]['samples_verification_output_path'])
        floors = df['floor']
        metrics = ['mean', 'var', 'skewness', 'kurtosis']
        sample_cols = ['sample_' + m for m in metrics]
        theory_cols = ['theoretical_' + m for m in metrics]
        labels = ['Mean', 'Variance', 'Skewness', 'Kurtosis']

        fig, axes = plt.subplots(2, 2, figsize=(max(10, len(floors) * 0.7), 7))
        axes = axes.ravel()

        for ax, sc, tc, lab in zip(axes, sample_cols, theory_cols, labels):
            ax.plot(floors, df[sc], marker='o', linestyle='-', label='Sample')
            ax.plot(floors, df[tc], marker='s', linestyle='--', label='Theory')
            ax.set_title(lab)
            ax.set_xticks(floors)
            ax.set_xticklabels(floors, rotation=45, ha='right')
            ax.legend()
            ax.grid(alpha=0.3)

        fig.suptitle('Floor-wise Statistics: Sample vs. Theoretical', y=0.98)
        plt.tight_layout(rect=[0, 0, 1, 0.96])
        os.makedirs(out_dir, exist_ok=True)
        plt.savefig(os.path.join(out_dir, 'summary_theory_vs_sample.png'), dpi=300)
        plt.close()# ---------------- 主流程 ----------------
    def run(self):
        pre_results = {}
        for _, row in tqdm(self.config.iterrows(), total=len(self.config), desc='Pre-sampling'):
            n_i, samples, seed = self.pre_sampling(row)
            pre_results[row['floor']] = {'n_i': n_i, 'samples': samples, 'final_seed': seed}

        n_unified = max(v['n_i'] for v in pre_results.values())
        # print(f'Unified sample size: {n_unified}')
        tqdm.write(f'Unified sample size: {n_unified}')

        for _, row in tqdm(self.config.iterrows(), total=len(self.config), desc='Formal'):
            fl = row['floor']
            if pre_results[fl]['n_i'] == n_unified:
                samp = pre_results[fl]['samples']
                ks_ok, p = self.ks_test(samp, dict(a=row['a'], b=row['b'], c=row['c']))
                conv = True
                seed = pre_results[fl]['final_seed']
            else:
                samp, ks_ok, p, conv, seed = self.formal_sampling(
                    row, n_unified, pre_results[fl]['final_seed'])

            os.makedirs(os.path.dirname(row['samples_output_path']), exist_ok=True)
            pd.DataFrame({'value': samp}).to_csv(row['samples_output_path'], index=False)
            self.generate_plots(samp, fl, row['samples_validation_output_path'])

            m = self.check_convergence(samp)
            a, b, c = row['a'], row['b'], row['c']
            theory_mean = PertDistribution.theoretical_mean(a, b, c)
            theory_var = PertDistribution.theoretical_var(a, b, c)
            theory_skew = PertDistribution.theoretical_skewness(a, b, c)
            theory_kurt = PertDistribution.theoretical_kurtosis(a, b, c)

            self.results.append({
                'floor': fl,
                'final_n': n_unified,
                'sample_mean': m['mean'],
                'theoretical_mean': theory_mean,
                'abs_bias_mean': abs(m['mean'] - theory_mean),
                'sample_var': m['var'],
                'theoretical_var': theory_var,
                'abs_bias_var': abs(m['var'] - theory_var),
                'sample_skewness': m['skewness'],
                'theoretical_skewness': theory_skew,
                'abs_bias_skewness': abs(m['skewness'] - theory_skew),
                'sample_kurtosis': m['kurtosis'],
                'theoretical_kurtosis': theory_kurt,
                'abs_bias_kurtosis': abs(m['kurtosis'] - theory_kurt),
                'ks_pvalue': p,
                'used_seed': seed,
                'converged': conv
            })

        summary_df = pd.DataFrame(self.results)
        out_dir = os.path.dirname(self.config.iloc[0]['samples_validation_output_path'])
        os.makedirs(out_dir, exist_ok=True)
        summary_df.to_csv(os.path.join(out_dir, 'samples_validation.csv'), index=False)
        self.plot_summary_bias(summary_df)
        print('All tasks completed.')


if __name__ == '__main__':
    SamplingProcessor('sampling_parameters2.csv').run()