#!/usr/bin/env python3

"""
Module for writing NMR STAR files from SpecDB schema
"""

import os
import sqlite3
import sys

import pynmrstar
from pynmrstar import utils, Loop


def flatten_query(sql, col, table, cursor):
	#print(sql)
	cursor.execute(sql)
	results = cursor.fetchall()
	flat = dict()
	
	for rr in results:
		for k in rr.keys():
			if k == 'id': continue
			if k == col: continue
			if (table, k) not in flat: flat[(table, k)] = list()
			flat[(table, k)].append(rr[k])
	
	
	return flat


def query_dbview(view, curr_view, table, fkeys, cursor):
	#all_fkeys[ctable][child] = {'parent':parent, 'ptable':ptable}
	for k, v in curr_view.items():
		#print(k,v)
		#continue
		if table in fkeys:
			if k[1] in fkeys[table]:
				view[k] = v
				parent_key = fkeys[table][k[1]]['parent']
				parent_table = fkeys[table][k[1]]['ptable']
				assert(type(v) == list and len(v) == 1)
				sql = f"select * from {parent_table} where {parent_key} = '{v[0]}' order by id ASC"
				#print(sql)
				fq = flatten_query(sql, '', parent_table, cursor)
				for k1 in fq:
					if k1 not in view: view[k1] = fq[k1]
				view = query_dbview(view, fq, parent_table, fkeys, cursor)
				del fkeys[table][k[1]]
			else:
				view[k] = v
		else:
			view[k] = v
	return view


def star_loop_constructor(tags, view, cat):
	
	if cat not in view: return {}
	
	loop_data = dict()
	
	maxlen = -1
	for t in view[cat]:
		if len(view[cat][t]) > maxlen:
				maxlen = len(view[cat][t])
	
	for col in view[cat]:
		if len(view[cat][col]) < maxlen:
			view[cat][col] += ['.'] * (len(view[cat][col]) - maxlen)
	
	loop_data = {t:view[cat][t] for t in tags if t in view[cat]}
	
	for k in loop_data:
		for i, val in enumerate(loop_data[k]):
			if type(val) != str: continue
			if '<' in val:
				print(val.replace('<',''))
				loop_data[k][i] = val.replace('<','')
			if '>' in val:
				loop_data[k][i] = val.replace('>','')
	
	#print(json.dumps(loop_data,indent=2))
	return loop_data


def fid2star(data=None, cursor=None, save=None):
	"""
	Take FID information from SpecDB into STAR
	
	Parameters
	----------
	+ data		SQLite row object from SpecDB summary query
	+ cursor	SQLite cursor object to perform queries
	+ save		path to save resulting STAR file
	
	Returns
	-------
	True		if STAR creation successful
	"""
	
	sql =  "SELECT m.name, p.* FROM sqlite_master m JOIN"
	sql += " pragma_foreign_key_list(m.name) p ON m.name != p.'table' WHERE"
	sql += " m.type = 'table' ORDER BY m.name"
	cursor.execute(sql)
	all_fks = cursor.fetchall()
	
	all_fkeys = dict()
	for fk in all_fks:
		#print(dict(fk))
		child  = fk[4]
		parent = fk[5]
		ptable = fk[3]
		ctable = fk[0]
		
		if ctable not in all_fkeys:
			all_fkeys[ctable] = dict()
		
		if child not in all_fkeys[ctable]:
			all_fkeys[ctable][child] = {'parent':parent, 'ptable':ptable}
	
	# gather conversion table
	cursor.execute("SELECT * from star_conversion")
	mappings = cursor.fetchall()
	
	translator = dict()
	for k in mappings:
		table = k[2]
		col   = k[1]
		tag   = k[3]
		frame = k[4]
		
		if table not in translator: translator[table] = dict()
		if col not in translator[table]: translator[table][col] = dict()
		if frame not in translator[table][col]:
			translator[table][col][frame] = []
		translator[table][col][frame].append(tag)
	
	#print(json.dumps(translator,indent=2))
	sql = f"SELECT * FROM time_domain_dataset where id == '{data['id']}'"
	cursor.execute(sql)
	results = cursor.fetchall()
	
	#print(dict(results[0]))
	
	view = dict()
	flat = flatten_query(sql, '', 'time_domain_dataset', cursor)
	dbview = query_dbview(view, flat, 'time_domain_dataset', all_fkeys, cursor)
	
	#print(list(dbview.keys()))
	#print(len(list(dbview.keys())))
	
	#print(dbview[('time_domain_dataset','pst_id')])
	sql = f"SELECT * FROM batch_components WHERE pst_id == '{dbview[('time_domain_dataset','pst_id')][0]}'"
	flat = flatten_query(sql, '', 'batch_components', cursor)
	moreview = query_dbview(dbview, flat, 'batch_components', all_fkeys, cursor)
	
	#print(json.dumps(list(moreview.keys()),indent=2))
	#print(len(list(moreview.keys())))
	
	sql = f"SELECT * from buffer_components WHERE buffer_id == '{dbview[('pst','buffer_id')][0]}'"
	flat = flatten_query(sql, 'buffer_id', 'buffer_components', cursor)
	fullview = query_dbview(moreview, flat, 'buffer_components', all_fkeys, cursor)
	
	#print(json.dumps(list(fullview.keys()),indent=2))
	#print(len(list(fullview.keys())))
	
	specdb_path = os.path.abspath(os.path.dirname(__file__))
	star_path = os.path.join(specdb_path, '../sql/template.str')
	
	# Fill up the STAR file
	# Load the template
	with open(star_path, 'r') as fp:
		entry = pynmrstar.Entry.from_file(fp)
	
	entry.entry_id = 'SpecDBQuery'
	# Obtain schema version
	schema = utils.get_schema()
	fullview['nmr_star_version'] = schema.version

	# Fill in timedomain specific tags not given in schema
	fullview['title'] = fullview[('project', 'project_id')][0]
	fullview['data_file_content_type'] = 'timedomain_data'
	
	"""
	_Experiment_file.Name
	_Experiment_file.Directory_path
	_Upload_data.Data_file_name
	_Upload_data.Data_file_content_type
	
	"""
	#print(save)
	save_split = save.split('/')
	save_path = '/'.join(save_split[:-1])
	#print(save_path)
	if 'fid' in os.listdir(save_path):
		tdname = 'fid'
	elif 'ser' in os.listdir(save_path):
		tdname = 'ser'
	
	file_dir_path = '/'.join(save_split[-3:-1])+'/'+tdname
	#sys.exit()
	
	#print(json.dumps(translator, indent=2))
	
	specdb = dict()
	for k in fullview.keys():
		#print(k)
		if k[0] in translator:
			if k[1] in translator[k[0]]:
				for frame in translator[k[0]][k[1]]:
					if frame not in specdb: specdb[frame] = dict()
					for tag in translator[k[0]][k[1]][frame]:
						if tag not in specdb[frame]: specdb[frame][tag] = list()
						specdb[frame][tag].extend(fullview[k])
	
	specdb['nmr_star_version'] = schema.version
	specdb['title'] = fullview[('project', 'project_id')][0]
	specdb['experiment_list']['_Experiment_file.Name'] = [tdname]
	specdb['experiment_list']['_Experiment_file.Type'] = ['free-induction decay']
	specdb['experiment_list']['_Experiment_file.Directory_path'] = [file_dir_path]
	
	#print(json.dumps(specdb, indent=2))
	#sys.exit()
	for frame in entry:
		#print(frame.tag_prefix)
		#print(frame._category)
		cat = frame._category
		#print(cat)
		#if cat not in specdb: continue
		
		#continue
		for ftags in frame.tag_dict.keys():
			
			tt = ftags.capitalize()
			#print(ftags)
			#print(frame.tag_prefix+'.'+ftags)
			#print(frame[frame.tag_prefix+'.'+ftags])
			
			full_tag = frame.tag_prefix+'.'+tt
			
			if ftags in specdb:
				frame[ftags] = specdb[ftags][0]
			#print(full_tag)
			if cat in specdb:
				#print(cat)
				if full_tag in specdb[cat]:
					#print(cat, full_tag)
					frame[ftags] = specdb[cat][full_tag][0]
			
		if len(list(frame.loop_iterator())) > 0:
			for loop in frame.loop_iterator():
				tags = loop.get_tag_names()
				
				if '_Upload_data.Data_file_name' in tags:
					data_to_add = {
						'_Upload_data.Data_file_name':[tdname],
						'_Upload_data.Data_file_content_type':['free-induction decay']
					}
					loop.add_data(data_to_add)
					continue
				
				data_to_add = star_loop_constructor(tags, specdb, cat)
				
				if not data_to_add: continue
				else:
					#print(tags)
					loop.add_data(data_to_add)
					#print(loop)
				
				
				
	
	entry.add_missing_tags()
	#entry.normalize()
	entry.write_to_file(save)
	#print("done")
	
	#print(entry.validate())
	
	return