import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import warnings

warnings.filterwarnings('ignore')


class PertDistribution:
    """PERT分布采样器"""

    @staticmethod
    def rvs(a, b, c, size=None, random_state=None):
        """生成PERT分布随机样本"""
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return a + (c - a) * stats.beta.rvs(alpha, beta, size=size, random_state=random_state)

    @staticmethod
    def pdf(x, a, b, c):
        """计算PERT分布的PDF"""
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return stats.beta.pdf((x - a) / (c - a), alpha, beta) / (c - a)

    @staticmethod
    def theoretical_skewness(a, b, c):
        """计算理论偏度 - 基于Beta分布"""
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return stats.beta(alpha, beta).stats(moments='s')  # 使用scipy的精确计算

    @staticmethod
    def theoretical_kurtosis(a, b, c):
        """计算理论峰度 - 基于Beta分布"""
        alpha = 1 + 4 * (b - a) / (c - a)
        beta = 1 + 4 * (c - b) / (c - a)
        return stats.beta(alpha, beta).stats(moments='k')  # 使用scipy的精确计算


class SamplingProcessor:
    def __init__(self, config_path):
        self.config = pd.read_csv(config_path)
        self.results = []

    def check_convergence(self, samples, thresholds):
        """检查收敛条件"""
        metrics = {
            'mean': np.mean(samples),
            'var': np.var(samples),
            'median': np.median(samples),
            'skewness': stats.skew(samples),
            'kurtosis': stats.kurtosis(samples, fisher=True)  # 使用Fisher峰度（0为正态），而不是Pearson峰度(峰度=3为正态)
        }
        return metrics

    def ks_test(self, samples, dist_params, dist_type='pert'):
        """KS检验"""
        if dist_type == 'pert':
            theoretical = PertDistribution.rvs(
                dist_params['a'], dist_params['b'], dist_params['c'],
                size=10000, random_state=42
            )
            ks_stat, p_value = stats.ks_2samp(samples, theoretical)
            return p_value >= 0.05, p_value

    def pre_sampling(self, row):
        """预采样阶段"""
        floor = row['floor']
        initial_seed = row['random_seed']  # 获取初始种子
        max_attempts = 100  # 最大尝试次数，避免无限循环

        n = row['initial_n']
        max_n = row['max_total_n']
        increment = row['increment_n']

        converged = False
        final_n = n
        current_seed = initial_seed  # 当前使用的种子

        skewness_threshold = row['skewness_threshold']
        kurtosis_threshold = row['kurtosis_threshold']

        while n <= max_n and not converged:
            samples = PertDistribution.rvs(
                row['a'], row['b'], row['c'],
                size=n, random_state=current_seed  # 使用当前种子
            )

            current_metrics = self.check_convergence(samples, row)
            theoretical_skew = PertDistribution.theoretical_skewness(row['a'], row['b'], row['c'])
            theoretical_kurt = PertDistribution.theoretical_kurtosis(row['a'], row['b'], row['c'])

            # 检查偏度和峰度是否接近理论值
            skew_diff = abs(current_metrics['skewness'] - theoretical_skew)
            kurt_diff = abs(current_metrics['kurtosis'] - theoretical_kurt)

            if not (skew_diff <= skewness_threshold):
                current_seed += 1
                continue
            if not (kurt_diff <= kurtosis_threshold):
                current_seed += 1
                continue

            if n > row['initial_n']:
                prev_samples = PertDistribution.rvs(
                    row['a'], row['b'], row['c'],
                    size=n - increment, random_state=current_seed
                )
                prev_metrics = self.check_convergence(prev_samples, row)

                mean_change = abs(current_metrics['mean'] - prev_metrics['mean']) / abs(prev_metrics['mean'])
                var_change = abs(current_metrics['var'] - prev_metrics['var']) / abs(prev_metrics['var'])

                if mean_change <= 0.005 and var_change <= 0.03:
                    converged = True
                    final_n = n
                    break

            n += increment
            current_seed += 1  # 种子加1

            if current_seed - initial_seed >= max_attempts:
                print(f"Warning: Reached maximum attempts for floor {floor}. Stopping pre-sampling.")
                break

        return final_n, samples, current_seed  # 返回最终使用的种子

    def formal_sampling(self, row, unified_n, initial_seed):
        """正式采样阶段"""
        floor = row['floor']
        max_attempts = 100  # 最大尝试次数，避免无限循环
        current_seed = initial_seed  # 当前使用的种子

        samples = None
        ks_pass = False
        ks_pvalue = None
        converged = False

        skewness_threshold = row['skewness_threshold']
        kurtosis_threshold = row['kurtosis_threshold']

        for attempt in range(max_attempts):
            np.random.seed(current_seed)

            samples = PertDistribution.rvs(
                row['a'], row['b'], row['c'],
                size=unified_n, random_state=current_seed
            )

            current_metrics = self.check_convergence(samples, row)
            theoretical_skew = PertDistribution.theoretical_skewness(row['a'], row['b'], row['c'])
            theoretical_kurt = PertDistribution.theoretical_kurtosis(row['a'], row['b'], row['c'])

            # 检查偏度和峰度是否接近理论值
            skew_diff = abs(current_metrics['skewness'] - theoretical_skew)
            kurt_diff = abs(current_metrics['kurtosis'] - theoretical_kurt)

            if not (skew_diff <= skewness_threshold):
                current_seed += 1
                continue
            if not (kurt_diff <= kurtosis_threshold):
                current_seed += 1
                continue

            ks_pass, ks_pvalue = self.ks_test(samples, {
                'a': row['a'], 'b': row['b'], 'c': row['c']
            })

            if unified_n >= 400:
                first_half = samples[:unified_n // 2]
                second_half = samples[unified_n // 2:]

                mean_change = abs(np.mean(second_half) - np.mean(first_half)) / abs(np.mean(first_half))
                var_change = abs(np.var(second_half) - np.var(first_half)) / abs(np.var(first_half))

                if mean_change <= 0.005 and var_change <= 0.03:
                    converged = True

            print(f"Floor: {floor}, Attempt: {attempt + 1}, Seed: {current_seed}, "
                  f"Skewness: {current_metrics['skewness']:.4f} (Theoretical: {theoretical_skew:.4f}), "
                  f"Kurtosis: {current_metrics['kurtosis']:.4f} (Theoretical: {theoretical_kurt:.4f}), "
                  f"Converged: {converged}, KS Pass: {ks_pass}, KS p-value: {ks_pvalue}")

            if converged and ks_pass:
                break

            current_seed += 1  # 种子加1

        return samples, ks_pass, ks_pvalue, converged, current_seed

    def plot_skewness_kurtosis_comparison(self, samples, a, b, c, floor_name, output_path):
        """绘制偏度和峰度的对比图"""
        # 计算样本的偏度和峰度
        sample_skewness = stats.skew(samples)
        sample_kurtosis = stats.kurtosis(samples, fisher=True)  # 使用Fisher峰度

        # 计算理论偏度和峰度
        theoretical_skewness = PertDistribution.theoretical_skewness(a, b, c)
        theoretical_kurtosis = PertDistribution.theoretical_kurtosis(a, b, c)

        # 绘制对比图
        fig, axes = plt.subplots(1, 2, figsize=(12, 6))

        # 偏度对比
        axes[0].bar(['Sample', 'Theoretical'], [sample_skewness, theoretical_skewness], color=['blue', 'red'])
        axes[0].set_title('Skewness Comparison')
        axes[0].set_ylabel('Skewness')
        axes[0].set_ylim(min(sample_skewness, theoretical_skewness) - 0.1, max(sample_skewness, theoretical_skewness) + 0.1)

        # 峰度对比
        axes[1].bar(['Sample', 'Theoretical'], [sample_kurtosis, theoretical_kurtosis], color=['blue', 'red'])
        axes[1].set_title('Kurtosis Comparison')
        axes[1].set_ylabel('Kurtosis')
        axes[1].set_ylim(min(sample_kurtosis, theoretical_kurtosis) - 0.1, max(sample_kurtosis, theoretical_kurtosis) + 0.1)

        # 保存图形
        plt.tight_layout()
        plt.savefig(output_path, dpi=300)
        plt.close()

    def plot_skewness_kurtosis_trends(self, samples, a, b, c, floor_name, output_path):
        """绘制偏度和峰度的动态变化图"""
        # 计算理论偏度和峰度
        theoretical_skewness = PertDistribution.theoretical_skewness(a, b, c)
        theoretical_kurtosis = PertDistribution.theoretical_kurtosis(a, b, c)

        # 初始化存储偏度和峰度的列表
        skewness_values = []
        kurtosis_values = []

        # 计算不同样本数量下的偏度和峰度
        for n in range(100, len(samples) + 1, 100):
            sample_skewness = stats.skew(samples[:n])
            sample_kurtosis = stats.kurtosis(samples[:n], fisher=True)
            skewness_values.append(sample_skewness)
            kurtosis_values.append(sample_kurtosis)

        # 绘制动态变化图
        fig, axes = plt.subplots(1, 2, figsize=(12, 6))

        # 偏度动态变化
        axes[0].plot(range(100, len(samples) + 1, 100), skewness_values, label='Sample Skewness')
        axes[0].axhline(y=theoretical_skewness, color='r', linestyle='--', label='Theoretical Skewness')
        axes[0].set_title('Skewness Trend')
        axes[0].set_xlabel('Sample Size')
        axes[0].set_ylabel('Skewness')
        axes[0].legend()

        # 峰度动态变化
        axes[1].plot(range(100, len(samples) + 1, 100), kurtosis_values, label='Sample Kurtosis')
        axes[1].axhline(y=theoretical_kurtosis, color='r', linestyle='--', label='Theoretical Kurtosis')
        axes[1].set_title('Kurtosis Trend')
        axes[1].set_xlabel('Sample Size')
        axes[1].set_ylabel('Kurtosis')
        axes[1].legend()

        # 保存图形
        plt.tight_layout()
        plt.savefig(output_path, dpi=300)
        plt.close()

    def generate_plots(self, samples, floor_name, output_path):
        """生成可视化图表"""
        fig, axes = plt.subplots(2, 2, figsize=(12, 10))

        # 直方图
        axes[0, 0].hist(samples, bins=30, alpha=0.7, density=True, label='Sample Histogram')
        x = np.linspace(min(samples), max(samples), 1000)
        y = PertDistribution.pdf(x, self.config[self.config['floor'] == floor_name]['a'].values[0],
                                 self.config[self.config['floor'] == floor_name]['b'].values[0],
                                 self.config[self.config['floor'] == floor_name]['c'].values[0])
        axes[0, 0].plot(x, y, 'r-', label='Theoretical PDF')
        axes[0, 0].set_title(f'{floor_name} Distribution Histogram')
        axes[0, 0].legend()

        # QQ图
        theoretical_dist = stats.beta(
            1 + 4 * (self.config[self.config['floor'] == floor_name]['b'].values[0] -
                     self.config[self.config['floor'] == floor_name]['a'].values[0]) / (
                        self.config[self.config['floor'] == floor_name]['c'].values[0] -
                        self.config[self.config['floor'] == floor_name]['a'].values[0]),
            1 + 4 * (self.config[self.config['floor'] == floor_name]['c'].values[0] -
                     self.config[self.config['floor'] == floor_name]['b'].values[0]) / (
                        self.config[self.config['floor'] == floor_name]['c'].values[0] -
                        self.config[self.config['floor'] == floor_name]['a'].values[0]),
            loc=self.config[self.config['floor'] == floor_name]['a'].values[0],
            scale=self.config[self.config['floor'] == floor_name]['c'].values[0] -
                  self.config[self.config['floor'] == floor_name]['a'].values[0]
        )
        stats.probplot(samples, dist=theoretical_dist, plot=axes[0, 1])
        axes[0, 1].set_title('QQ Plot')

        # CDF对比
        x = np.linspace(min(samples), max(samples), 100)
        theoretical_cdf = stats.beta.cdf(x, 1 + 4 * (self.config[self.config['floor'] == floor_name]['b'].values[0] -
                                                     self.config[self.config['floor'] == floor_name]['a'].values[0]) / (
                                                     self.config[self.config['floor'] == floor_name]['c'].values[0] -
                                                     self.config[self.config['floor'] == floor_name]['a'].values[0]),
                                         1 + 4 * (self.config[self.config['floor'] == floor_name]['c'].values[0] -
                                                  self.config[self.config['floor'] == floor_name]['b'].values[0]) / (
                                                     self.config[self.config['floor'] == floor_name]['c'].values[0] -
                                                     self.config[self.config['floor'] == floor_name]['a'].values[0]),
                                         loc=self.config[self.config['floor'] == floor_name]['a'].values[0],
                                         scale=self.config[self.config['floor'] == floor_name]['c'].values[0] -
                                               self.config[self.config['floor'] == floor_name]['a'].values[0])

        sorted_samples = np.sort(samples)
        ecdf = np.arange(1, len(sorted_samples) + 1) / len(sorted_samples)

        axes[1, 0].plot(x, theoretical_cdf, 'r-', label='Theoretical CDF')
        axes[1, 0].step(sorted_samples, ecdf, where='post', label='Sample CDF')
        axes[1, 0].legend()
        axes[1, 0].set_title('CDF Comparison')

        # 箱线图
        axes[1, 1].boxplot(samples)
        axes[1, 1].set_title('Box Plot')

        plt.tight_layout()
        plt.savefig(output_path, dpi=300)
        plt.close()

        # 绘制偏度和峰度的对比图
        skew_kurt_output_path = output_path.replace('.png', '_skew_kurt.png')
        self.plot_skewness_kurtosis_comparison(samples, self.config[self.config['floor'] == floor_name]['a'].values[0],
                                               self.config[self.config['floor'] == floor_name]['b'].values[0],
                                               self.config[self.config['floor'] == floor_name]['c'].values[0],
                                               floor_name, skew_kurt_output_path)

        # 绘制偏度和峰度的动态变化图
        skew_kurt_trend_output_path = output_path.replace('.png', '_skew_kurt_trend.png')
        self.plot_skewness_kurtosis_trends(samples, self.config[self.config['floor'] == floor_name]['a'].values[0],
                                           self.config[self.config['floor'] == floor_name]['b'].values[0],
                                           self.config[self.config['floor'] == floor_name]['c'].values[0],
                                           floor_name, skew_kurt_trend_output_path)

    def run(self):
        """主运行函数"""
        pre_sampling_results = {}

        for _, row in self.config.iterrows():
            floor = row['floor']
            print(f"Pre-sampling {floor}...")
            n_i, samples, final_seed = self.pre_sampling(row)
            pre_sampling_results[floor] = {
                'n_i': n_i,
                'samples': samples,
                'final_seed': final_seed
            }

        n_unified = max([v['n_i'] for v in pre_sampling_results.values()])
        print(f"Unified sample size: {n_unified}")

        for _, row in self.config.iterrows():
            floor = row['floor']
            print(f"Formal sampling {floor}...")

            if pre_sampling_results[floor]['n_i'] == n_unified:
                samples = pre_sampling_results[floor]['samples']
                ks_pass, ks_pvalue = self.ks_test(samples, {
                    'a': row['a'], 'b': row['b'], 'c': row['c']
                })
                converged = True
                final_seed = pre_sampling_results[floor]['final_seed']
            else:
                samples, ks_pass, ks_pvalue, converged, final_seed = self.formal_sampling(
                    row, n_unified, pre_sampling_results[floor]['final_seed'])

            samples_output_path = row['samples_output_path']
            verification_output_path = row['samples_verification_output_path']

            os.makedirs(os.path.dirname(samples_output_path), exist_ok=True)
            os.makedirs(os.path.dirname(verification_output_path), exist_ok=True)

            pd.DataFrame({'value': samples}).to_csv(samples_output_path, index=False)
            self.generate_plots(samples, floor, verification_output_path)

            final_metrics = self.check_convergence(samples, row)
            self.results.append({
                'floor': floor,
                'pre_sample_n': pre_sampling_results[floor]['n_i'],
                'final_sample_n': n_unified,
                'ks_pvalue': ks_pvalue,
                'final_mean': final_metrics['mean'],
                'theoretical_mean': (row['a'] + 4 * row['b'] + row['c']) / 6,
                'final_var': final_metrics['var'],
                'theoretical_var': ((row['c'] - row['a']) ** 2 * (row['a'] + 4 * row['b'] + row['c']) * (
                            5 * row['a'] + 8 * row['b'] + 5 * row['c'])) / (
                                               36 * (row['a'] + 4 * row['b'] + row['c']) ** 2),
                'final_median': final_metrics['median'],
                'final_skewness': final_metrics['skewness'],
                'theoretical_skewness': PertDistribution.theoretical_skewness(row['a'], row['b'], row['c']),
                'final_kurtosis': final_metrics['kurtosis'],
                'theoretical_kurtosis': PertDistribution.theoretical_kurtosis(row['a'], row['b'], row['c']),
                'used_seed': final_seed,
                'converged': 'Yes' if converged else 'No'
            })

        summary_path = os.path.join(os.path.dirname(verification_output_path), 'samples_verification.csv')
        summary_df = pd.DataFrame(self.results)
        summary_df.to_csv(summary_path, index=False)
        print("\nSampling completed! Check the specified directories for results.")


if __name__ == "__main__":
    processor = SamplingProcessor('sampling_parameters2.csv')
    processor.run()