# import argparse
# import numpy as np

# def analyze_columns_accumulated(data):
#     # 计算所有累积行中每列的平均值、方差和中位数
#     columns = np.array(data, dtype=float)
#     avg = np.mean(columns, axis=0)   # 计算每一列的平均值
#     var = np.var(columns, axis=0)    # 计算每一列的方差
#     median = np.median(columns, axis=0)  # 计算每一列的中位数
#     return avg, var, median

# def process_tsv(file_path, start_keyword, end_keyword):
#     accumulated_data = []  # 用于保存分析区域内的所有数据行
    
#     with open(file_path, 'r') as file:
#         analyzing = False
#         for line in file:
#             line = line.strip()  # 去除首尾的空格和换行符
#             if not line:  # 如果是空行则跳过
#                 continue
            
#             if start_keyword in line:
#                 analyzing = True  # 开始分析
#                 print(f"Start analyzing from: {line}")
#                 continue
            
#             if end_keyword in line:
#                 print(f"End analyzing at: {line}")
#                 break  # 结束分析
            
#             if analyzing:
#                 # 解析行，跳过第一列
#                 line_data = line.split('\t')[1:-1]  # 从第二列开始，取到倒数第二列
#                 accumulated_data.append(line_data)  # 将每行数据累积
    
#     # 如果有数据，则计算统计量
#     if accumulated_data:
#         avg, var, median = analyze_columns_accumulated(accumulated_data)
#         print(f"Overall Analysis:")
#         print(f"Average: {avg}")
#         print(f"Variance: {var}")
#         print(f"Median: {median}")
#     else:
#         print("No data found between the specified keywords.")

# def main():
#     parser = argparse.ArgumentParser(description='Analyze specific columns in a TSV file.')
#     parser.add_argument('file', help='Path to the TSV file.')
#     parser.add_argument('--start', required=True, help='Start keyword to begin analysis.')
#     parser.add_argument('--end', required=True, help='End keyword to stop analysis.')
    
#     args = parser.parse_args()
    
#     process_tsv(args.file, args.start, args.end)

# if __name__ == '__main__':
#     main()


import argparse
import numpy as np

def analyze_columns_accumulated(data):
    # 计算所有累积行中每列的平均值、方差和中位数
    columns = np.array(data, dtype=float)
    avg = np.mean(columns, axis=0)   # 计算每一列的平均值
    var = np.var(columns, axis=0)    # 计算每一列的方差
    median = np.median(columns, axis=0)  # 计算每一列的中位数
    return avg, var, median

def process_tsv(file_path, start_keyword):
    accumulated_data = []  # 用于保存分析区域内的所有数据行
    
    with open(file_path, 'r') as file:
        analyzing = False
        for line in file:
            if not line.endswith('\n'):  # 检测行是否未完全生成
                print(f"Incomplete line found and skipped: {line}")
                break  # 跳过未完全生成的行
            line = line.strip()  # 去除首尾的空格和换行符
            if not line:  # 跳过空行
                continue
            
            if start_keyword in line:
                analyzing = True  # 找到开始关键字
                print(f"Start analyzing from: {line}")
                continue
            #analyzing = True
            if analyzing:
                
                
                # 解析行，跳过第一列
                try:
                    line_data = line.split('\t')[1:-1]  # 从第二列开始，取到倒数第二列
                    accumulated_data.append(line_data)  # 将每行数据累积
                except Exception as e:
                    print(f"Error parsing line: {line}, Error: {e}")
                    continue
    
    # 如果有数据，则计算统计量
    if accumulated_data:
        avg, var, median = analyze_columns_accumulated(accumulated_data)
        print(f"Overall Analysis:")
        print(f"Average: {avg}")
        print(f"Variance: {var}")
        print(f"Median: {median}")
    else:
        print("No data found after the specified keyword.")

def main():
    parser = argparse.ArgumentParser(description='Analyze specific columns in a TSV file.')
    parser.add_argument('file', help='Path to the TSV file.')
    parser.add_argument('--start', required=False, help='Start keyword to begin analysis.')
    
    args = parser.parse_args()
    
    process_tsv(args.file, args.start)

if __name__ == '__main__':
    main()
