#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by PyCharm.
# @Project: Code_Clone_Detection_Educoder
# @File: code_fingerprint.py
# @Description: 
# @Time: 2018/12/12 15:48
# @Author: ljz

import base64
import json
from hashlib import md5

from CCDetector.lexers import java_lexer, python_lexer
from CCDetector.utils import code_format, mysql_pool


def write_to_database(param, query_id, code_information, origin_code):
    mysql_operator = mysql_pool.MysqlPool()

    for i in code_information:
        mysql_operator.insert("""
        insert into educoder_code_clone.info 
        (query_id, user_id, file_path, code_md5, row_num, passed_time) values ('{}', '{}', '{}', '{}', '{}', '{}')
        """.format(query_id, param['user_id'], param['file_path'], i[1], i[2], param['passed_time']))

    for i in origin_code:
        mysql_operator.insert("""
        insert into educoder_code_clone.origin_code 
        (query_id, user_id, file_path, line_code, row_num) values ('{}', '{}', '{}', '{}', '{}')
        """.format(query_id, param['user_id'], param['file_path'], i[0].replace('\'', '\\\''), i[1]))


def start_lexer(param, content, query_id, language):
    content = content.replace('“', '"')
    content = content.replace('”', '"')
    if language == 'java':
        lexer = java_lexer.JavaLexer(content)
    elif language == 'python':
        lexer = python_lexer.PythonLexer(content)
    else:
        return
    tokens = lexer.analyze()

    rows = 0
    code_information = []
    line = []
    for index, token in enumerate(tokens):
        line.append(token)
        if token == '\n' or token == '\r':
            del line[-1]
            rows += 1
            code = ''.join(line)
            code_md5 = md5(code.encode('utf-8')).hexdigest()
            code_information.append([code, code_md5, rows])
            line = []
        elif index == len(tokens) - 1:
            rows += 1
            code = ''.join(line)
            code_md5 = md5(code.encode('utf-8')).hexdigest()
            code_information.append([code, code_md5, rows])
    if language == 'java':
        code_information = code_format.handle_java_annotation(code_information)
    elif language == 'python':
        code_information = code_format.handle_python_annotation(code_information)

    rows = 0
    origin_code = []
    line_code = ''
    for index, token in enumerate(content):
        line_code += token
        if token == '\n' or token == '\r':
            rows += 1
            origin_code.append([line_code, rows])
            line_code = ''
        elif index == len(content) - 1:
            rows += 1
            origin_code.append([line_code, rows])

    write_to_database(param, query_id, code_information, origin_code)


def decode_base64(data):
    missing_padding = 4 - len(data) % 4
    if missing_padding:
        data += '=' * missing_padding
    return base64.urlsafe_b64decode(data).decode(encoding='utf-8')


def parse_json(language, data, query_id):
    detect_sequence = []
    user_data = json.loads(data)
    for user in user_data:
        if user['user_id'] is None:
            continue
        single_user = [user['user_id']]
        for code_info in user['code_info']:
            if code_info['path'] is None or code_info['content'] is None:
                continue
            single_user.append(code_info['path'])
            param = {'user_id': user['user_id'],
                     'file_path': code_info['path'],
                     'passed_time': code_info['passed_time']}
            start_lexer(param, decode_base64(code_info['content']), query_id, language)
        detect_sequence.append(single_user)

    return detect_sequence
