from flask import Flask, request, jsonify
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import io
import logging
from scipy.io import loadmat
import requests

logging.basicConfig(level=logging.DEBUG)

class HAL9000(nn.Module):
    def __init__(self):
        super().__init__()

        self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2)
        self.maxpool1 = nn.MaxPool2d(2, 2)
        self.bn1 = nn.BatchNorm2d(32)
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2)
        self.maxpool2 = nn.MaxPool2d(2, 2)
        self.bn2 = nn.BatchNorm2d(64)
        self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2)
        self.maxpool3 = nn.MaxPool2d(2, 2)
        self.bn3 = nn.BatchNorm2d(64)
        self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3)
        self.maxpool4 = nn.MaxPool2d(2, 2)
        self.bn4 = nn.BatchNorm2d(64)
        self.fc1 = nn.Linear(in_features=256, out_features=128)
        self.fc2 = nn.Linear(in_features=128, out_features=120)
        self.dropout = nn.Dropout(.5)

    def forward(self, x):
        x = F.leaky_relu(self.conv1(x))
        x = self.bn1(self.maxpool1(x))
        x = F.leaky_relu(self.conv2(x))
        x = self.bn2(self.maxpool2(x))
        x = F.leaky_relu(self.conv3(x))
        x = self.bn3(self.maxpool3(x))
        x = F.leaky_relu(self.conv4(x))
        x = self.bn4(self.maxpool4(x))
        x = x.view(-1, 256)
        x = F.leaky_relu(self.dropout(self.fc1(x)))
        x = self.fc2(x)
        return x


# 加载模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = HAL9000().to(device)
try:
    model.load_state_dict(torch.load("HAL9000_best_val_89.32.pt", map_location=device, weights_only=True)["network"])
    model.eval()
    logging.info("Model loaded successfully")
except Exception as e:
    logging.error(f"Failed to load model: {e}")
    raise
# 数据预处理
transform = transforms.Compose([
    transforms.Resize((512, 512)),
    transforms.ToTensor(),
    transforms.Lambda(lambda x: x[:3] if x.size(0) >= 3 else x.repeat(3, 1, 1)),  # 确保是3通道
])

app = Flask(__name__)

# 自定义日志记录器
class CustomLoggingFilter(logging.Filter):
    def filter(self, record):
        # 忽略 favicon.ico 的日志
        return "favicon.ico" not in record.getMessage()

# 添加自定义日志过滤器
app.logger.addFilter(CustomLoggingFilter())

def download_image(url):
    response = requests.get(url)
    if response.status_code == 200:
        return Image.open(io.BytesIO(response.content))
    else:
        return None

@app.route('/predict', methods=['POST'])
def predict():
    logging.debug("Received request")
    if 'image_url' in request.form:
        url = request.form['image_url']
        try:
            image = download_image(url)
            if image is None:
                return "Failed to download image", 400
            image = transform(image).unsqueeze(0).to(device)
            logging.debug(f"Image shape: {image.shape}")  # 调试信息

            with torch.no_grad():
                output = model(image)
                _, predicted = torch.max(output, 1)

            logging.debug(f"Model output shape: {output.shape}")
            logging.debug(f"Predicted index: {predicted.item()}")

            mat = loadmat('file_list.mat')
            file_list = [f[0][0] for f in mat['file_list']]
            class_names = list(set([f.split('/')[0] for f in file_list]))
            class_names.sort()

            result = {"prediction": class_names[predicted.item()]}
            return jsonify(result)
        except Exception as e:
            logging.error(f"Error during prediction: {e}")
            return f"Error during prediction: {e}", 500
    elif 'image' in request.files:
        file = request.files['image']
        if file.filename == '':
            logging.error("No selected file")
            return "No selected file", 400

        try:
            image = Image.open(io.BytesIO(file.read()))
            image = transform(image).unsqueeze(0).to(device)
            logging.debug(f"Image shape: {image.shape}")  # 调试信息

            with torch.no_grad():
                output = model(image)
                _, predicted = torch.max(output, 1)

            logging.debug(f"Model output shape: {output.shape}")
            logging.debug(f"Predicted index: {predicted.item()}")

            mat = loadmat('file_list.mat')
            file_list = [f[0][0] for f in mat['file_list']]
            class_names = list(set([f.split('/')[0] for f in file_list]))
            class_names.sort()

            result = {"prediction": class_names[predicted.item()]}
            return jsonify(result)
        except Exception as e:
            logging.error(f"Error during prediction: {e}")
            return f"Error during prediction: {e}", 500
    else:
        logging.error("No image part in request")
        return "No image part", 400

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)