from lxml.html.clean import Cleaner
import re
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
# import requests
en_stop=stopwords.words('english')
it_stop=stopwords.words('italian')
stop=[]
stop.append(en_stop)
stop.append(it_stop)
pt=PorterStemmer()

with open('a.txt','w',encoding='utf-8') as ar:
    with open('b.txt','w',encoding='utf-8') as br:    
        with open('wikicomp-2014_enit.xml','r',encoding='utf-8',errors='ignore') as p:
            html=p.readlines()
            reg = re.compile('<[^>]*>')
            article1=''
            article2=''
            lang=0
            #清除不必要的标签
            for line in html:
                if '<articlePair' in line:
                    id=re.findall(r'\d+',line)[0]
                    print(id)
                    article=''
                if '<article lang="en"' in line:
                    article1=''
                    lang=1
                if '<article lang="it"' in line:
                    article2=''
                    lang=2
                line=line.strip('\n')
                content = reg.sub('',line).strip()
                if lang==1:
                    article1+=content
                else:
                    article2+=content
                if '</article>' in line:
                    if lang==1:
                        word_tokens=word_tokenize(article1.strip('/n').strip())                     
                        filtered_sentence=[w for w in word_tokens if w not in stop]
                        porter_sentence=[pt.stem(v) for v in filtered_sentence]
                        text=' '.join(porter_sentence)
                        ar.write('<DOCNO>'+str(id)+'</DOCNO>\n')
                        ar.write('<TEXT>'+text+'</TEXT>\n')
                    else:
                        word_tokens=word_tokenize(article2.strip('/n').strip()) 
                        filtered_sentence=[w for w in word_tokens if w not in stop]
                        porter_sentence=[pt.stem(v) for v in filtered_sentence]
                        text=' '.join(porter_sentence)
                        br.write('<DOCNO>'+str(id)+'</DOCNO>\n')
                        br.write('<TEXT>'+text+'</TEXT>\n')


        #这里打印出来的结果会将上面过滤的标签去掉，但是未过滤的标签任然存在。
            # print(content)
            # r.write(content)
