# -*- encoding=utf-8 -*-

import re
import os
import sys
import time
import datetime
import yaml
import json
import requests
from os.path import exists
from collections import defaultdict
from elasticsearch import Elasticsearch

import warnings
warnings.filterwarnings("ignore")

# ------------------------------------------------------------------------
# constants
# get constants from compass-ci config
# ------------------------------------------------------------------------
env_lkp_src = os.environ.get('LKP_SRC')
LKP_SRC = env_lkp_src if env_lkp_src else '/c/lkp-tests'

def load_config_file(conf_file):
    file_path = os.path.realpath(conf_file)
    if not os.path.exists(file_path):
        print(f"{conf_file} does exists!")
        sys.exit(0)

    with open(conf_file, 'r', encoding='utf-8') as f:
        if conf_file.endswith('.json'):
            return json.load(f)
        else:
            file_content = f.read()
            return yaml.load(file_content, yaml.FullLoader)

def get_all_cci_config():
    files = []
    dirs = ['/etc/compass-ci/',
            '/etc/compass-ci/defaults/',
            '/etc/compass-ci/service/',
            '/etc/compass-ci/accounts/',
            '/etc/compass-ci/register/',
            f"{os.environ.get('HOME')}/.config/compass-ci/defaults/"
    ]
    for dir_path in dirs:
        file_list = os.listdir(dir_path)
        for f in file_list:
            if f.endswith('.yaml'):
                files.append(dir_path + f)
    return files
   
def get_rpm_name(rpm):
    m = re.match(r"^(.+)-.+-.+", rpm)

    if m:
        return m.group(1)
    else:
        return rpm

def cci_defaults():
    configs = {}
    for conf_file in get_all_cci_config():
        configs.update(load_config_file(conf_file))
    return configs
     
def load_category():
    rpm_category = {}
    current_dit = os.path.dirname(os.path.abspath(__file__))
    category_list = load_config_file(os.path.realpath(f"{current_dit}/category.json"))
    for category in category_list:
        rpm_name = get_rpm_name(category.get("bin"))
        if rpm_name:
            rpm_category[rpm_name] = category.get("level")

    return rpm_category

CATEGORY = load_category()
cci_config = cci_defaults()
ES_HOST = cci_config.get('ES_HOST')
ES_PORT = cci_config.get('ES_PORT')
ES_USER = cci_config.get('ES_USER')
ES_PASSWORD = cci_config.get('ES_PASSWORD')

CLOUD_ES_HOST = cci_config.get('CLOUD_ES_HOST')
CLOUD_ES_PORT = cci_config.get('CLOUD_ES_PORT')
CLOUD_ES_USER = cci_config.get('CLOUD_ES_USER')
CLOUD_ES_PASSWORD = cci_config.get('CLOUD_ES_PASSWORD')

REPRODUCIBLE_HOST = cci_config.get('REPRODUCIBLE_HOST') if cci_config.get('REPRODUCIBLE_HOST') else '172.17.0.1'
REPRODUCIBLE_PORT = cci_config.get('REPRODUCIBLE_PORT') if cci_config.get('REPRODUCIBLE_PORT') else 10006

ARCH = {
    "aarch64": "arm64",
    "x86_64": "amd64"
}

# ------------------------------------------------------------------------
# ES client
# for query and index doc
# ------------------------------------------------------------------------

ES_CLIENT = Elasticsearch(
      [f"http://{ES_HOST}:{ES_PORT}/"],
      http_auth=(ES_USER, ES_PASSWORD)
      )
CLOUD_ES_CLIENT = Elasticsearch(
      [f"http://{CLOUD_ES_HOST}:{CLOUD_ES_PORT}/"],
      #verify_certs = False,
      http_auth=(CLOUD_ES_USER, CLOUD_ES_PASSWORD)
      )

# ------------------------------------------------------------------------
# reproducible tests 
# step-1: batch submit rpmbuild tests twice for each repo
# step-2: monitor all jobs and wait jobs finish
# step-3: call api: /web_backend/diif_2rpms for create diffoscope report in job2['result_root']
# step-4: extract test info from job1,job2 of each repo reproducible-test
# ------------------------------------------------------------------------

# STEP-1: batch submit rpmbuild tests twice for each repo
def submit_jobs(matrix_file):
    ids = []
    file_path = os.path.realpath(matrix_file)
    today = str(datetime.datetime.now()).split(' ')[0]
    group_id = f"reproducible-test-{today}"
    a = os.popen(f"{LKP_SRC}/sbin/batch-submit {file_path} group_id={group_id} docker_image=openeuler:20.03-LTS-SP1 && {LKP_SRC}/sbin/batch-submit {file_path} group_id={group_id} docker_image=openeuler:20.03-LTS-SP1")
    for line in  a.readlines():
        print(line)
        line = line.strip()
        if re.search('got job id', line):
            ids.append(line.split('=')[1])
    return ids

# STEP-2: monitor all jobs and wait jobs finish
# monitor data:
# {
#    'm/minicom/minicom': {'finish': {'crystal.5244778': 1, 'crystal.5244780': 1}},
#    'p/pigz/pigz': {'finish': {'crystal.5244777': 1, 'crystal.5244779': 1}}
# }
AGGS = {
    "all_upstream_repo": {
        "terms": {"field": "upstream_repo", "size": 10000},
        "aggs": {
            "all_job_stage": {
                 "terms": {"field": "job_stage", "size": 10000},
                 "aggs": {
                     "all_job_id": {
                         "terms": {"field": "id", "size": 10000}
                     }
                 }
             }
    	}
    }
}

def extract_aggs(data):
    extract_data = {}
    for field, field_value in data.items():
        if field.startswith('all'):
            for item in field_value['buckets']:
                if len(item) > 2:
                    extract_data.setdefault(item['key'], {})
                    extract_data[item['key']] = extract_aggs(item)
                else:
                    extract_data[item['key']] = item['doc_count']
    return extract_data

def get_monitor_data(ids):
    query = {
        "query": {"terms": {"id": ids}},
	"aggs": AGGS,
	"size": 0
    }
    data = ES_CLIENT.search(
      index = 'jobs',
      body = query
    )['aggregations']
    return  extract_aggs(data)

def call_diffoscope(first_id, second_id):
    request_data = {
        "first_id": first_id,
        "second_id": second_id
    }
    response = requests.post(f"http://{REPRODUCIBLE_HOST}:{REPRODUCIBLE_PORT}/reproducible/diff_2rpms", json.dumps(request_data))
    print(response)

    return response.text

def get_test_status(job, rpm):
    if job and isinstance(job, dict):
        return job.get(rpm, "failing to build")
    else:
        return "failing to build"

def get_build_log(job):
    buildlog_path = job.get('result_root') + '/' + job.get('suite')
    log_size = 0
    if not exists("/srv" + buildlog_path):
        buildlog_path = job.get('result_root') + '/' + 'stderr'
    if exists("/srv" + buildlog_path):
        log_size = os.stat("/srv" + buildlog_path).st_size

    return buildlog_path, log_size

def get_rpm_builinfo(rpm, job1, job2):
    first_buildinfo = job1.get('result_root') + "/" + rpm + '-buildinfo'
    second_buildinfo = job2.get('result_root') + "/" + rpm + '-buildinfo'

    buildinfos = {
            "first": {
                    "hashkey": get_hashkey("/srv" + first_buildinfo),
                    "buildinfo": first_buildinfo
                },
            "second": {
                    "hashkey": get_hashkey("/srv" + second_buildinfo),
                    "buildinfo": second_buildinfo
                }
            }

    return buildinfos

def get_hashkey(buildinfo):
    if not exists(buildinfo):
        return ""

    ret = os.popen("cat " + buildinfo + " | grep Checksums-Sha256 | awk '{print $2}'")
    hash_key = ret.read()

    return hash_key.strip()

def get_version(rpm):
    name = get_rpm_name(rpm)
    version = ""

    # 名称-版本号-发布号.发行商.体系.rpm
    # eg: grpc-1.31.0-6.oe1.x86_64.rpm
    m = re.match(r"(.+)-(.+)\.(.+)?\.(.+)\.rpm", rpm.replace(name, "", 1))
    if m:
       	version = m.group(1) + "-" + m.group(2) + "-" + m.group(3)
	
    # 名称-版本号-发布号.体系.rpm
    # eg: grpc-1.31.0-6.x86_64.rpm
    m = re.match(r"(.+)-(.+)\.(.+)\.rpm", rpm.replace(name, "", 1))
    version = m.group(1).replace("-", "") + "-" + m.group(2)

    return version

def get_diffoscope_logs(rpm, job):
    logs = []
    log_results = "/srv" + job.get('result_root') + "/diffoscope/"
    if exists(log_results):
        os.chdir(log_results)
        files = os.listdir()
        for f in files:
            if f.startswith(rpm):
                logs.append(job.get('result_root') + '/diffoscope/' + f)

    return logs

def assgin_repo_info(repo_info, rpm_list2, job1, job2, status):
    if rpm_list2:
        repo_info.setdefault("build_infos", {})
        repo_info.setdefault("rpms", {})
        repo_info["test_status"] = "reproducible"

        for rpm in rpm_list2:
            rpm_status = get_test_status(status, rpm)
            rpm_name = get_rpm_name(rpm)
            if rpm_name == repo_info.get("pkg_name"):
                repo_info["version"] = get_version(rpm)

            if rpm_status != "reproducible":
                repo_info["test_status"] = "unreproducible"

            repo_info["rpms"][rpm] = {
                    "test_status": rpm_status,
                    "category_level": CATEGORY.get(rpm_name, "other"),
                    "diffoscope_logs": get_diffoscope_logs(rpm, job2),
                    "build_infos": get_rpm_builinfo(rpm, job1, job2)
                }
    else:
        repo_info["test_status"] = "failing to build"

    repo_info["test_result"] = repo_info["test_status"]

def count_reproducible_result(job_id1, job_id2, status):
    body_job1 = {
      'query':{
        'match':{
          '_id':job_id1
        }
      }
    }

    body_job2 = {
      'query':{
        'match':{
          '_id':job_id2
        }
      }
    }

    res_job1 = ES_CLIENT.search(
      index = 'jobs',
      body = body_job1
      )
    res_job2 = ES_CLIENT.search(
      index = 'jobs',
      body = body_job2
      )
    job1 = res_job1['hits']['hits'][0]['_source']
    job2 = res_job2['hits']['hits'][0]['_source']

    output_dict = defaultdict(dict)
    total_dict = defaultdict(list)

    # source package info
    output_dict["arch"] = ARCH.get(job1.get('arch'))
    output_dict["submit_time"] = job1.get('submit_time')
    pkg_name = job1.get('upstream_repo').split('/')[-1]
    output_dict["pkg_name"] = pkg_name
    output_dict["pkg_path"] = job1.get('upstream_url')

    level = job1.get("category_level", "other")
    output_dict["category_level"] = "level-" + str(level) if level != 'other' else 'other'

    output_dict["upstream_repo"] = job1.get("upstream_repo")
    output_dict["upstream_branch"] = job1.get("upstream_branch")
    output_dict["group_id"] = job1.get("group_id")
    output_dict["task_start_time"] = job2.get('start_time')
    output_dict["task_end_time"] = job2.get('end_time')
    output_dict["commit_id"] = job2.get("upstream_commit")

    # buildlogs
    buildlog_1, buildlog_1_size = get_build_log(job1)
    buildlog_2, buildlog_2_size = get_build_log(job2)
    output_dict["build_logs"] = {
            "first": {
		"buildlog": buildlog_1,
		"size": buildlog_1_size
		},
            "second": {
		"buildlog": buildlog_2,
		"size": buildlog_2_size
		}
            }


    rpm_list1 = rpm_judgements('/srv' + job1.get('result_root'))
    rpm_list2 = rpm_judgements('/srv' + job2.get('result_root'))

    # repo info storge
    # repo info come from compass-ci ES/jobs
    repo_info = output_dict
    repo_info["id"] = pkg_name + '-' + job_id1 + '-' + job_id2
    # current commit version(git log tag)
    # repo_info["version"] = 
    assgin_repo_info(repo_info, rpm_list2, job1, job2, status)
    CLOUD_ES_CLIENT.index(
        index = 'repo-reproducible-test',
        doc_type = '_doc',
        id = repo_info["id"],
        body = json.dumps(repo_info, indent=4))

    
    # rpms info and storge
    for rpm in rpm_list2:
        rpm_name = get_rpm_name(rpm)
        output_dict["rpm_name"] = rpm
        output_dict["id"] = rpm_name + '-' + job_id1 + '-' + job_id2
        output_dict["test_status"] = get_test_status(status, rpm)
        output_dict["test_result"] = output_dict["test_status"]
        output_dict["build_infos"] = get_rpm_builinfo(rpm, job1, job2)
        output_dict["rpm_version"] = get_version(rpm)
        output_dict["diffoscope_logs"] = get_diffoscope_logs(rpm, job2)

        ES_CLIENT.index(
            index = 'reproducible-test',
            doc_type = '_doc',
            id = output_dict["id"],
            body = json.dumps(output_dict, indent=4))

        CLOUD_ES_CLIENT.index(
            index = 'reproducible-test',
            doc_type = '_doc',
            id = output_dict["id"],
            body = json.dumps(output_dict, indent=4))

def rpm_judgements(path):
  rpm_list = []
  if os.path.exists(path):
    os.chdir(path)
    files = os.listdir()
    for file_name in files:
        if str(file_name).endswith('.rpm'):
            rpm_list.append(file_name)
  return rpm_list

def wait(ids):
    max_wait_time = 3600 * 48
    while len(ids) > 0 and max_wait_time > 0:
        print(f"{len(ids)} jobs are running")
        finish_ids = []
        monitor_data = get_monitor_data(ids)
        if len(ids) < 10:
          print(ids)
          print(monitor_data)

        for repo, repo_result in monitor_data.items():
            target = repo_result.get('finish')
            if target:
                if len(target) < 2:
                    continue
                else:
                    current_ids = sorted(list(target.keys()))
                    print(f"we are now handle: {repo}")
                    print(current_ids)
                    # STEP-3:  call api: /web_backend/diif_2rpms
                    status = call_diffoscope(current_ids[0], current_ids[1])
                    try:
                        status = json.loads(status)
                    except:
                        pass

                    time.sleep(1)

                    # STEP-4: extract test info from job1,job2 of each repo reproducible-test
                    count_reproducible_result(current_ids[0], current_ids[1], status)
                    finish_ids += current_ids
            else:
                continue
        ids = list(set(ids).difference(set(finish_ids)))
        max_wait_time -= 60
        print('---------spleep 60s for next checking----------')
        print(f"---------now is: {datetime.datetime.now()}----------")
        time.sleep(60)
        
