from multiprocessing import shared_memory
import time
import win32gui
import win32con

import torch.nn.functional as F
import torch
import numpy as np
import cv2
from PIL import Image
import torchvision

from model import Net
from dataset import train_loader
from grabscreen import grab_screen
from dataset import get_train_transforms
from config import x1, y1, x2, y2


def test():
    network.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            output = network(data)
            test_loss += F.nll_loss(output, target, size_average=False).item()
            pred = output.data.max(1, keepdim=True)[1]
            correct += pred.eq(target.data.view_as(pred)).sum()
    test_loss /= len(test_loader.dataset)
    test_losses.append(test_loss)
    print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


if __name__ == '__main__':

    network = Net()
    network.eval()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    network.load_state_dict(torch.load('model.pth', map_location=device))
    network = network.to(device)
    test_loader = train_loader
    test_losses = []

    # transforms = get_train_transforms(input_size=(28, 28))
    with torch.no_grad():

        mode = 'screenshot'
        if mode == 'screenshot':
            # x1, y1, x2, y2 = int(1920 / 2 - 416 / 2), int(1080 / 2 - 416 / 2), int(1920 / 2 + 416 / 2), int(
            #     1080 / 2 + 416 / 2)

            # x1, y1 = int(725 - 64 / 2), int(1005 - 64 / 2)
            # x2, y2 = int(725 + 64 / 2), int(1005 + 64 / 2)
            fps = 0.0

            view_cv = False  # 是否预览
            view_cv = True  # 是否预览
            view_size = 200

            if view_cv:
                cv2.namedWindow('pubg_pos', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('pubg_pos', view_size, view_size)

            # listener = pynput.mouse.Listener(
            #     # on_move=on_move,
            #     on_click=on_click,
            #     # on_scroll=on_scroll
            # )
            # listener.start()
            try:  # 判断姿势
                share_posture = shared_memory.ShareableList([1], name='share_posture')
            except FileExistsError:
                share_posture = shared_memory.ShareableList(name='share_posture')

            try:  # 判断是否处于瞄准状态
                share_aim_mode = shared_memory.ShareableList([0], name='share_aim_mode')
            except FileExistsError:
                share_aim_mode = shared_memory.ShareableList(name='share_aim_mode')

            pre_transforms = torchvision.transforms.Compose([
                torchvision.transforms.ToPILImage(),
                # torchvision.transforms.SSDCropping(),
                # torchvision.transforms.Resize(size=(256, 256)),
                torchvision.transforms.Resize(size=(28, 28)),
                # torchvision.transforms.ColorJitter(),
                torchvision.transforms.ToTensor(),
                # torchvision.transforms.RandomHorizontalFlip(),
                # torchvision.transforms.Normalization(),
                torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),  # 归一化
                # torchvision.transforms.AssignGTtoDefaultBox()
            ])

            pos_cls = ['lie', 'squat', 'stand']

            lock_mode = False

            while True:

                t1 = time.time()
                time.sleep(0.01)  # 延时

                img0 = grab_screen(region=(x1, y1, x2, y2))

                if view_cv:
                    r_img0 = cv2.resize(img0, dsize=(view_size, view_size))

                img0 = pre_transforms(img0)
                img0 = img0.unsqueeze(0).to(device)

                # img0 = Image.fromarray(np.uint8(img0))

                # posture = np.array(network(img0))  # .detach().numpy()
                posture = network(img0)  # .detach().numpy()
                share_posture[0] = int(posture.argmax())
                if posture.std() > 1.:
                    share_aim_mode[0] = 1  # 处于瞄准状态
                else:
                    share_aim_mode[0] = 0  # 处于瞄准状态
                # Out[1]: tensor(2.7272, device='cuda:0') 是
                # Out[2]: tensor(0.1394, device='cuda:0') 否
                # tensor(0.7066, device='cuda:0') 是
                #  tensor(1.0257, device='cuda:0') 是
                # tensor(1.4517, device='cuda:0') 是

                # print(posture, pos_cls[posture.argmax()])
                # r_img0 =

                if view_cv:

                    fps = round((fps + (1. / (time.time() - t1))) / 2)
                    r_img0 = cv2.putText(r_img0, f"FPS {fps}", (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
                    r_img0 = cv2.putText(r_img0, f"POS {pos_cls[posture.argmax()]}", (0, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
                    r_img0 = cv2.putText(r_img0, f"Aim {share_aim_mode[0]}", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

                    cv2.imshow('pubg_pos', r_img0)

                    hwnd = win32gui.FindWindow(None, 'pubg_pos')
                    CVRECT = cv2.getWindowImageRect('pubg_pos')
                    win32gui.SetWindowPos(hwnd, win32con.HWND_TOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE | win32con.SWP_NOSIZE)

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        cv2.destroyAllWindows()
                        break
