import keras
# import tensorflow.python.keras as tf_keras
import torch

from Artear.data_gen import *
from Artear.modelling import *
from Artear.validation import validate


class Modeller:
    def __init__(self, backend='pytorch', batch_size=16, **data_source):
        assert False not in [isinstance(source, str) for source in data_source]

        self.model_type = ''
        self.backend = backend
        self.data_source = data_source
        self.batch_size = batch_size
        self.loss = 0.
        self.metrics = {}
        self.data_generator, self.model = None, []
        self.config = {}

    def set_model(self, *model, optimizer=None, loss=None, metrics=None):
        assert model and isinstance(model[0], type(model[1])), 'wrong type of model is given'
        from keras.engine.keras_tensor import KerasTensor
        if isinstance(model[0], KerasTensor):
            assert model[1] is not None
            print('using tf.Tensor')
        elif isinstance(model[0], keras.Model):
            self.model = [model[0]]
        elif isinstance(model[0], torch.nn.Module):
            self.model = [model[0]]
        else:
            assert True, 'unsupported backend'
            return

    def read_data(self, **config):
        self.model_type = config['model_type']
        self.data_generator = None

        if self.model_type == 'audio2params':
            if 'pickled_data' in self.data_source and self.data_source['pickled_data']:
                self.data_generator = audio2params_data_gen_from_file(self.data_source, config, self.batch_size)
            else:
                self.data_generator = audio2params_data_gen(self.data_source, config, self.batch_size)
        elif self.model_type == 'audio2audio':
            self.data_generator = audio2params_data_gen(self.data_source, config, self.batch_size)
        elif self.model_type == 'params2params':
            if 'pickled_data' in self.data_source and self.data_source['pickled_data']:
                self.data_generator = data_gen_from_file(self.data_source)            

        self.config = config

    def build_model(self):
        data = next(self.data_generator)
        if self.model_type == 'audio2params':
            print(data['in'][0].shape, data['out'][0].shape)
            self.model = audio2params_modelling(data['in'][0].shape[1:], data['out'][0].shape[-1],
                                                backend=self.backend)
        elif self.model_type == 'audio2audio':
            self.model = audio2audio_modelling(data['in'][0].shape[1:], backend=self.backend)
        elif self.model_type == 'params2params':
            self.model = params2params_modelling(data['in'][0].shape[1:], backend=self.backend)
        else:
            return None

    def train(self, steps=10, show_summary=False, enable_validation=True, use_fit=True, validate_every=15, server=()):
        if server:
            import socket
            import pickle
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((server[0], server[1]))
            print('Connected to server:', s.getsockname())
            
            def send_data_to_server(data):
                serialized_data = pickle.dumps(data, protocol=2)
                s.send(serialized_data)
            
            def get_data_from_server():
                dat = s.recv(1024*1024)
                loaded_data = pickle.loads(dat)
                return loaded_data
            
        assert self.data_generator is not None, 'no data is given'
        data = next(self.data_generator)
        if not self.model:
            assert data and type(data['in'][0]) == np.ndarray and type(data['out'][0]) == np.ndarray, \
                'data is None or wrong type of data is given'
            self.build_model()
            if isinstance(self.model[0], keras.Model):
                if show_summary:
                    self.model[0].summary()

        if isinstance(self.model[0], keras.Model):
            if use_fit:
                def gen_x():
                    while True:
                        yield np.concatenate(next(self.data_generator)['in'])
                
                if self.model_type in ('audio2audio', 'params2params'):
                    self.model[0].fit(x=gen_x(), y=None, epochs=steps)
                else:
                    data_in, data_out = [], []
                    for step in range(1, steps + 1):
                        data_ = next(self.data_generator)
                        data_in += data_['in']
                        data_out += data_['out']
                    x = np.concatenate(data_in)
                    y = np.concatenate(data_out)
                    del data_in, data_out
                    self.model[0].fit(x, y, epochs=steps)
                validate(self.model[1], self.data_generator, self.config)
            else:
                for step in range(1, steps + 1):
                    if self.model_type in ('audio2audio', 'params2params'):
                        if server:
                            send_data_to_server(next(self.data_generator))
                            result = get_data_from_server()
                        else:
                            data = next(self.data_generator)
                            result = self.model[0].train_on_batch(x=np.concatenate(data['in']), y=None, return_dict=True)
                        self.loss = result['loss']
                        print('loss:', self.loss)
                        if step % validate_every == 0:
                            validate(self.model[1], self.data_generator, self.config)
                    else:
                        if server:
                            send_data_to_server(next(self.data_generator))
                            result = get_data_from_server()
                        else:
                            data = next(self.data_generator)
                            result = self.model[0].train_on_batch(np.concatenate(data['in']), np.concatenate(data['out']),
                                                              return_dict=True)
                        self.loss, self.metrics['acc'] = result['loss'], result['acc']
                        print('loss:', self.loss, 'acc:', self.metrics['acc'])
                # cuda.select_device(0)
                # cuda.close()
                K.clear_session()
                gc.collect()

        elif isinstance(self.model[0], torch.nn.Module):
            if not server:
                torch.backends.cudnn.benchmark = True
                device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
                model = self.model[0]
                self.model[0].train()
            else:
                send_data_to_server(self.config)
                
            if self.model_type in ('audio2audio', 'params2params'):
                pass
            else:
                for step in range(1, steps + 1):
                    print('step:', step)
                    if server:
                        send_data_to_server(next(self.data_generator))
                        train_loss = get_data_from_server()
                    else:
                        criterion = torch.nn.CrossEntropyLoss()
                        optimizer = torch.optim.Adam(model.parameters())
                        
                        data = next(self.data_generator)
                        
                        in_data = torch.from_numpy(np.concatenate(data['in']))
                        out_data = torch.from_numpy(np.concatenate(data['out']))
                        del data
                        print(in_data.shape, out_data.shape)
                        output = model(in_data)
                        loss = criterion(output, out_data)
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()
                        train_loss = loss.item()

                    self.loss, self.metrics['acc'] = train_loss, None
                    print('loss:', self.loss, 'acc:', self.metrics['acc'])
                    
        if server:
            s.close()
                    
    def as_server(self, model_type='audio2params', addr='127.0.0.1', port=24017):
        import socket
        import pickle
        
        server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server.bind(('127.0.0.1', 24017))
        server.listen(5)

        while True:
            print('Waiting for client to connect')
            client, addr = server.accept()
            print('Connected to client: %s' % str(addr))
            
            def get_data_from_client():
                dat = client.recv(1024*1024)
                loaded_data = pickle.loads(dat)
                return loaded_data
            
            if self.backend == 'pytorch':
                device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
                
                if self.model_type in ('audio2audio', 'params2params'):
                    pass
                else:
                    self.config = get_data_from_client()
                    self.model_type = self.config['model_type']
                    self.build_model()
                    model = self.model[0]
                    self.model[0].train()
                    criterion = torch.nn.CrossEntropyLoss()
                    optimizer = torch.optim.Adam(model.parameters())
                    step = 1

                    while True:
                        print('step:', step)
                        data = get_data_from_client()
                        in_data = torch.from_numpy(np.concatenate(data['in']))
                        out_data = torch.from_numpy(np.concatenate(data['out']))
                        del data
                        output = model(in_data)
                        loss = criterion(output, out_data)
                        optimizer.zero_grad()
                        loss.backward()
                        optimizer.step()
                        train_loss = loss.item() * in_data.size(0)
                
                        serialized_data = pickle.dumps(train_loss, protocol=2)
                        client.send(serialized_data)

                        self.loss, self.metrics['acc'] = train_loss, None
                        print('loss:', self.loss, 'acc:', self.metrics['acc'])
                        step += 1
            
            # clientsocket.close()

        

if __name__ == '__main__':
    print('正在创建模型')
    modeller = Modeller(backend='pytorch')
    modeller.as_server(addr='127.0.0.1', port=24017)
    