import torch,regex,numpy as np
from fastai.vision.all import *
import regex
import torch.nn.functional as F
from sklearn.model_selection import StratifiedKFold
from tqdm.notebook import tqdm

measurements = {
    'weight': [('mg', 1), ('g', 1000), ('gr', 1000), ('gram', 1000), ('kg', 1000000)],
    'length': [('mm', 1), ('cm', 10), ('m', 1000), ('meter', 1000)],
    'pieces': [('pc', 1)],
    'memory': [('gb', 1)],
    'volume': [('ml', 1), ('l', 1000), ('liter', 1000)]
}


def to_num(x, mult=1):
    x = x.replace(',', '.')
    return int(float(x) * mult)


def extract_unit(tit, m):
    pat = f'\W(\d+(?:[\,\.]\d+)?) ?{m}s?\W'
    matches = regex.findall(pat, tit, overlapped=True)
    return set(matches)


def extract(tit):
    res = dict()
    tit = ' ' + tit.lower() + ' '
    for cat, units in measurements.items():
        cat_values = set()
        for unit_name, mult in units:
            values = extract_unit(tit, unit_name)
            values = {to_num(v, mult) for v in values}
            cat_values = cat_values.union(values)
        if cat_values:
            res[cat] = cat_values
    return res


def add_measurements(data):
    data['measurement'] = data.title.map(extract)
    return data


def match_measures(m1, m2):
    k1, k2 = set(m1.keys()), set(m2.keys())
    common = k1.intersection(k2)
    if not common: return True
    for key in common:
        s1, s2 = m1[key], m2[key]
        if s1.intersection(s2):
            return True
    return False


def check_measurements(combined_dists, combined_inds, data_df):
    K = min(8, len(data_df)) * len(data_df)
    _, inds_k = combined_dists.view(-1).topk(K)
    removed = 0
    inds_k = inds_k.tolist()
    for idx in inds_k:
        x = idx // combined_inds.shape[1]
        y_idx = idx % combined_inds.shape[1]
        y = combined_inds[x, y_idx]
        if not match_measures(data_df.iloc[x].measurement, data_df.iloc[y.item()].measurement):
            removed += 1
            combined_dists[x][y_idx] = 0
    print('removed', removed, 'matches')


def add_target_groups(data_df, source_column='label_group', target_column='target'):
    target_groups = data_df.groupby(source_column).indices
    data_df[target_column] = data_df[source_column].map(target_groups)
    return data_df


def add_splits(train_df, valid_group=0):
    grouped = train_df.groupby('label_group').size()

    labels, sizes = grouped.index.to_list(), grouped.to_list()

    skf = StratifiedKFold(5)
    splits = list(skf.split(labels, sizes))

    group_to_split = dict()
    for idx in range(5):
        labs = np.array(labels)[splits[idx][1]]
        group_to_split.update(dict(zip(labs, [idx] * len(labs))))

    train_df['split'] = train_df.label_group.replace(group_to_split)
    train_df['is_valid'] = train_df['split'] == valid_group
    return train_df


def embs_from_model(model, dl):
    all_embs = []
    all_ys = []
    for batch in tqdm(dl):
        if len(batch) == 2:
            bx, by = batch
        else:
            bx, = batch
            by = torch.zeros(1)
        with torch.no_grad():
            embs = model(bx)
            all_embs.append(embs.half())
        all_ys.append(by)
    all_embs = F.normalize(torch.cat(all_embs))
    return all_embs, torch.cat(all_ys)


def get_targets_shape(train_df):
    all_targets = add_target_groups(train_df).target.to_list()
    all_targets_lens = [len(t) for t in all_targets]
    targets_shape = []
    for size in range(min(all_targets_lens), max(all_targets_lens) + 1):
        count = all_targets_lens.count(size) / len(all_targets)
        targets_shape.append((size, count))
    return targets_shape


def chisel(groups, groups_p, pos, target_count):
    probs = []
    groups_lens = [len(g) for g in groups]
    current_count = groups_lens.count(pos)
    if current_count >= target_count:
        return
    to_cut = target_count - current_count
    for i in range(len(groups)):
        if len(groups_p[i]) > pos:
            probs.append((i, groups_p[i][pos]))
    probs.sort(key=lambda x: x[1])
    for i in range(min(to_cut, len(probs))):
        group_idx = probs[i][0]
        groups[group_idx] = groups[group_idx][:pos]
        groups_p[group_idx] = groups_p[group_idx][:pos]


def sorted_pairs(distances, indices):
    triplets = []
    n = len(distances)
    for x in range(n):
        used = set()
        for ind, dist in zip(indices[x].tolist(), distances[x].tolist()):
            if not ind in used:
                triplets.append((x, ind, dist))
                used.add(ind)
    return sorted(triplets, key=lambda x: -x[2])


def do_chunk(embs):
    step = 1000
    for chunk_start in range(0, embs.shape[0], step):
        chunk_end = min(chunk_start + step, len(embs))
        yield embs[chunk_start:chunk_end]


def get_nearest(embs, emb_chunks, K=None, sorted=True):
    if K is None:
        K = min(51, len(embs))
    distances = []
    indices = []
    for chunk in emb_chunks:
        sim = embs @ chunk.T
        top_vals, top_inds = sim.topk(K, dim=0, sorted=sorted)
        distances.append(top_vals.T)
        indices.append(top_inds.T)
    return torch.cat(distances), torch.cat(indices)


def combined_distances(embs_list):
    K = min(len(embs_list[0]), 51)
    combined_inds = [get_nearest(embs, do_chunk(embs))[1] for embs in embs_list]
    combined_inds = torch.cat(combined_inds, dim=1)
    res_inds, res_dists = [], []
    for x in range(len(combined_inds)):
        inds = combined_inds[x].unique()
        Ds = [embs[None, x] @ embs[inds].T for embs in embs_list]
        D = Ds[0] + Ds[1] - Ds[0] * Ds[1]
        top_dists, top_inds = D.topk(K)
        res_inds.append(inds[top_inds])
        res_dists.append(top_dists)
    return torch.cat(res_inds), torch.cat(res_dists)


def blend_embs(embs_list, data_df, threshold=.97, m2_threshold=.6):
    combined_inds, combined_dists = combined_distances(embs_list)
    check_measurements(combined_dists, combined_inds, data_df)
    new_embs_list = L((torch.empty_like(embs) for embs in embs_list))
    for x in range(len(embs_list[0])):
        neighs = combined_dists[x] > threshold
        if neighs.sum() == 1 and combined_dists[x][1] > m2_threshold:
            neighs[1] = 1
        neigh_inds, neigh_ratios = combined_inds[x, neighs], combined_dists[x, neighs]
        for embs, new_embs in zip(embs_list, new_embs_list):
            new_embs[x] = (embs[neigh_inds] * neigh_ratios.view(-1, 1)).sum(dim=0)
    return new_embs_list.map(F.normalize)