# import shutil
# import yaml
# from flask import Flask, request, jsonify, send_file
# import os
# from werkzeug.utils import secure_filename
# import zipfile
#
# # @app.route('/datasets/create', methods=['POST'])
# def create_dataset():
#     # 验证请求数据
#     if 'file' not in request.files:
#         return jsonify({'error': 'No file uploaded'}), 400
#
#     file = request.files['file']
#     if not file.filename.endswith('.zip'):
#         return jsonify({'error': 'Only ZIP files are allowed'}), 400
#
#     # 获取表单数据
#     dataset_name = request.form.get('name', 'untitled')
#     description = request.form.get('description', '')
#
#     # 创建数据集目录，此处 secure_filename(dataset_name)，不能只有中文，否则会报错
#     dataset_dir = os.path.join(app.config['DATASETS_UPLOAD_FOLDER'], secure_filename(dataset_name))
#     if os.path.exists(dataset_dir):
#         return jsonify({'error': 'Dataset already exists'}), 409
#     os.makedirs(dataset_dir)
#
#     try:
#         # 保存并解压ZIP文件
#         zip_path = os.path.join(dataset_dir, file.filename)
#         file.save(zip_path)
#
#         with zipfile.ZipFile(zip_path, 'r') as zip_ref:
#             # 自动处理嵌套目录
#             root_dir = None
#             for name in zip_ref.namelist():
#                 if '/' in name:
#                     root_dir = name.split('/')[0]
#                     break
#             temp_dir = os.path.join(dataset_dir, 'temp')
#             zip_ref.extractall(temp_dir)
#             actual_dir = os.path.join(temp_dir, root_dir) if root_dir else temp_dir
#             # 移动文件到目标目录
#             for item in os.listdir(actual_dir):
#                 src = os.path.join(actual_dir, item)
#                 dst = os.path.join(dataset_dir, item)
#                 if os.path.exists(dst):
#                     shutil.rmtree(dst)
#                 shutil.move(src, dst)
#             shutil.rmtree(temp_dir)
#
#         # ========== 增强的目录验证逻辑 ==========
#         required_dirs = [
#             'images/train',
#             'images/val',
#             'labels/train',
#             'labels/val'
#         ]
#
#         missing_dirs = []
#         for rel_path in required_dirs:
#             parts = rel_path.split('/')
#             current_dir = dataset_dir
#             valid = True
#             for part in parts:
#                 found = False
#                 for item in os.listdir(current_dir):
#                     if item.startswith(('.', '__')):
#                         continue  # 跳过隐藏文件
#                     item_path = os.path.join(current_dir, item)
#                     if os.path.isdir(item_path) and item.lower() == part:
#                         current_dir = item_path
#                         found = True
#                         break
#                 if not found:
#                     valid = False
#                     break
#             if not valid:
#                 missing_dirs.append(rel_path)
#
#         if missing_dirs:
#             raise ValueError(f'Missing required directories: {", ".join(missing_dirs)}')
#
#         # 2. 检查.yaml配置文件
#         yaml_files = [f for f in os.listdir(dataset_dir) if f.endswith(('.yaml', '.yml'))]
#         if not yaml_files:
#             raise ValueError('Missing .yaml configuration file')
#         if len(yaml_files) > 1:
#             raise ValueError('Multiple .yaml files found, expected only one')
#
#         # 3. 验证图像和标注文件对应关系
#         def validate_image_label_pair(img_dir, label_dir):
#             img_files = set([os.path.splitext(f)[0] for f in os.listdir(img_dir)
#                              if f.lower().endswith(('.png', '.jpg', '.jpeg'))])
#             label_files = set([os.path.splitext(f)[0] for f in os.listdir(label_dir)
#                                if f.endswith('.txt')])
#
#             # 检查是否有未匹配的文件
#             unmatched_images = img_files - label_files
#             unmatched_labels = label_files - img_files
#
#             errors = []
#             if unmatched_images:
#                 errors.append(f"{len(unmatched_images)} images without labels")
#             if unmatched_labels:
#                 errors.append(f"{len(unmatched_labels)} labels without images")
#
#             if errors:
#                 raise ValueError('Mismatch found: ' + ', '.join(errors))
#
#             return len(img_files)
#
#         # 验证训练集和验证集
#         try:
#             train_img_dir = os.path.join(dataset_dir, 'images/train')
#             train_label_dir = os.path.join(dataset_dir, 'labels/train')
#             train_count = validate_image_label_pair(train_img_dir, train_label_dir)
#
#             val_img_dir = os.path.join(dataset_dir, 'images/val')
#             val_label_dir = os.path.join(dataset_dir, 'labels/val')
#             val_count = validate_image_label_pair(val_img_dir, val_label_dir)
#         except ValueError as ve:
#             raise ValueError(f'Validation failed: {str(ve)}')
#
#         # 4. 验证.yaml文件内容
#         yaml_path = os.path.join(dataset_dir, yaml_files[0])
#         try:
#             with open(yaml_path, 'r') as f:
#                 yaml_content = yaml.safe_load(f)
#                 required_keys = ['train', 'val', 'nc', 'names']
#                 for key in required_keys:
#                     if key not in yaml_content:
#                         raise ValueError(f'Missing required key in YAML: {key}')
#         except yaml.YAMLError:
#             raise ValueError('Invalid YAML file format')
#
#         # ========== 保存到数据库 ==========
#         new_dataset = Dataset(
#             name=dataset_name,
#             description=description,
#             path=dataset_dir,
#             train_samples=train_count,
#             val_samples=val_count,
#             status='verified' if train_count > 0 and val_count > 0 else 'unverified',
#             yaml_path=yaml_path,
#             num_classes=yaml_content.get('nc', 0)
#         )
#         db.session.add(new_dataset)
#         db.session.commit()
#
#         return jsonify({
#             'code': 200,
#             'message': 'success',
#             'data': {
#                 'id': new_dataset.id,
#                 'name': new_dataset.name,
#                 'train_samples': train_count,
#                 'val_samples': val_count,
#                 'num_classes': new_dataset.num_classes,
#                 'yaml_path': new_dataset.yaml_path
#             }
#         })
#         # return jsonify({
#         #     'id': new_dataset.id,
#         #     'name': new_dataset.name,
#         #     'train_samples': train_count,
#         #     'val_samples': val_count,
#         #     'num_classes': new_dataset.num_classes,
#         #     'yaml_path': new_dataset.yaml_path
#         # }), 201
#
#     except Exception as e:
#         shutil.rmtree(dataset_dir, ignore_errors=True)
#         return jsonify({
#             'error': 'Dataset validation failed',
#             'details': str(e)
#         }), 400



# 数据集模型
# class Dataset(db.Model):
#     __tableName__ = 'dataset'
#     id = db.Column(db.Integer, primary_key=True)
#     name = db.Column(db.String(100), unique=True, nullable=False)
#     description = db.Column(db.Text)
#     path = db.Column(db.String(200), nullable=False)
#     train_samples = db.Column(db.Integer, default=0)# 训练样本数
#     val_samples = db.Column(db.Integer, default=0) # 验证样本数
#     num_classes = db.Column(db.Integer, default=0)  # 新增类别数
#     yaml_path = db.Column(db.String(200))          # 新增YAML路径
#     status = db.Column(db.String(20), default='unverified') # 状态：unverified/verified/invalid
#     created_time = db.Column(db.DateTime, default=datetime.now)
#
# # “模型”模型
# class Model(db.Model):
#     id = db.Column(db.Integer, primary_key=True)
#     name = db.Column(db.String(100), nullable=False)
#     description = db.Column(db.Text)
#     dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id'))
#     model_type = db.Column(db.String(50))
#     status = db.Column(db.String(20), default='pending')
#     created_time = db.Column(db.DateTime, default=datetime.utcnow)
#
#     def to_dict(self):
#         return {
#             'id': self.id,
#             'name': self.name,
#             'description': self.description,
#             'dataset_id': self.dataset_id,
#             # 'dataset': self.dataset.name,#需要根据id从数据集表中获取名称
#             'model_type': self.model_type,
#             'status': self.status,
#             'created_time': self.created_time.isoformat()
#         }
