import json
import re

# 用于将scierc转化成文本生成格式
def get_sequence(word_list):
    sequence = ''.join([word if word.startswith((',', '.', ':', ';', '!', '?', '%')) else ' ' + word for word in word_list]).strip()
    return sequence

def read_dataset(data_name, set_name):
    dataset = []

    if data_name == "scierc":
        with open (f"data/origin_data/scierc/{set_name}.json", 'r', encoding='utf-8') as f:
            for line in f:
                line = json.loads(line)
                data = {}
                data['text'] = get_sequence(line['sentence'])
                entity = []
                for ner in line['ner']:
                    entity.append([get_sequence(line['sentence'][ner[0]:ner[1]+1]), ner[2]])
                data['label'] = entity
                # relation = []
                # for rel in line['relation']:
                #     relation.append([get_sequence(line['sentence'][rel[0]:rel[1]+1]), get_sequence(line['sentence'][rel[2]:rel[3]+1]), rel[4]])
                # data['relation'] = relation
                dataset.append(data)
    
    
    elif data_name == "scicite":
        with open (f"data/origin_data/{data_name}/{set_name}.txt", 'r', encoding='utf-8') as f:
            for line in f:
                line = json.loads(line)
                dataset.append(line)
    
    elif data_name == "scinli":
        with open (f"data/origin_data/{data_name}/{set_name}.jsonl", 'r', encoding='utf-8') as f:
            for line in f:
                line = json.loads(line)
                line['text'] = f"#1: {line['sentence1']}\n#2: {line['sentence2']}"
                dataset.append(line)
    
    elif data_name == "pwc_kw":
        with open(f"data/origin_data/{data_name}/{set_name}.json", 'r', encoding='utf-8') as f:
            origin_dataset = json.load(f)
            for data in origin_dataset:
                title_match = re.search(r'"([^"]+)"', data['instruction'])
                title = title_match.group(1)
                category_match = re.search(r'the keywords of (\w+) type', data['instruction'])
                category = category_match.group(1)
                dataset.append({
                    "text": f"title: {title}\nabstract: {data['input']}\ncategory: {category}",
                    "label": data['output']
                })
    
    return dataset

def get_instruction():

    if data_name == "scierc":
        instruction = 'Extract scientific entities from sentences. The scientific entity category includes [\'Method\', \'Task\', \'Metric\', \'Material\', \'Generic\', \'OtherScientificTerm\', \'Generic\'].\nSentence:\n'
    elif data_name == 'scicite':
        instruction = 'Identify the intent of a citation in scientific papers. Choose the citation intention of the following sentence from [\'method\', \'background\', \'result\'].\nSentence:\n'
    elif data_name == 'pwc_kw':
        instruction = 'Generate keywords for specific categories based on title and abstract of paper. \n'
    elif data_name == 'scinli':
        instruction = 'Identify the semantic relationship between the following pair of sentences. Choose one between \"contrasting\", \"reasoning\", \"entailment\" or \"neutral\".\nSentence:\n'
    return instruction



if __name__ == '__main__':
    data_name = 'scinli'
    set_name = 'train'
    data = read_dataset()
    instruction = get_instruction()
    idx = 1
    res = []
    for d in data:
        input_text = instruction + d['text']
        output_text = str(d['label'])

        res.append({
            "conversation_id": idx,
            "category": "nature language inference",
            "conversation": [
                {
                    "human": input_text,
                    "assistant": output_text
                }
            ]
        })
        idx += 1

    with open(f'data/instruct_data/{data_name}/{set_name}.jsonl', "w", encoding='utf8') as f:
        for r in res:
            f.write(json.dumps(r, ensure_ascii=False)+'\n')
