print('aaa')
print('aaa')
print('aaa')
import argparse
print('hhhh')
import spacy

nlp = spacy.load("en_core_web_sm")

def split_text(text):
    # Use spaCy to split the text by clauses
    doc = nlp(text)
    splited_para = []
    for sent in doc.sents:
        tokenS = []
        for token in sent:
            tokenS.append(token.text)
            if token.is_punct:
            # if token.is_punct or token.is_stop:
                splited_para.append(" ".join(tokenS))
                                  # " ".join(tokenS)是即一个clause
                tokenS = []
        if tokenS:
            splited_para.append(" ".join(tokenS))
    # Return the split text as a list of strings
    return splited_para

# parser = argparse.ArgumentParser(description='Split text into clauses')
# parser.add_argument('input_file',  metavar='input_file', type=str, nargs='?')
# parser.add_argument('output_file', metavar='output_file', type=str, nargs='?' )
#
# args = parser.parse_args()
args = { 'input_file': './toy_article.txt' ,  'output_file': './toy_out.txt'}

# Read the input text from a file or standard input
if args.input_file:
    with open(args.input_file, "r") as f:
        text = f.read()
else:
    text = input()

# Split the text into clauses
splited_text = split_text(text)

# Write the output text to a file or standard output
if args.output_file:
    with open(args.output_file, "w") as f:
        for line in splited_text:
            f.write(line + "\n")
else:
    for line in splited_text:
        print(line)


