import tkinter
import requests  
import io  
import sys
import urllib
import newspaper
from newspaper import Article
from tkinter import StringVar
from tkinter import scrolledtext
from bs4 import BeautifulSoup
from bs4 import BeautifulSoup, Comment
from sklearn.feature_extraction.text import TfidfVectorizer


root= tkinter.Tk()
root.title("刘亚圣")
root.geometry("700x700")
label=tkinter.Label(root,text="请输入网址1:",fg="black",font=("宋体", 10),)
label.grid(column=0,row=0)

e=StringVar()
entry1=tkinter.Entry(root,textvariable=e)
entry1.grid(column=1,row=0)

label2=tkinter.Label(root,text="请输入网址2:",fg="black",font=("宋体", 10),)
label2.grid(column=0,row=1)

b=StringVar()
entry2=tkinter.Entry(root,textvariable=b)
entry2.grid(column=1,row=1)

#网页源码框1
label=tkinter.Label(root,text="网址1源码:",fg="black",font=("宋体", 10),)
label.grid(column=0,row=2)



texta=scrolledtext.ScrolledText(root,width=35,height=15)
texta.place(x=30,y=90)



#文本提取框1

label=tkinter.Label(root,text="网页1文本提取:",fg="black",font=("宋体", 10),)
label.place(x=10,y=300)

texta1=scrolledtext.ScrolledText(root,width=35,height=15)
texta1.place(x=30,y=330)


#网页2
label=tkinter.Label(root,text="网址2源码:",fg="black",font=("宋体", 10),)
label.place(x=330,y=64)

textb=scrolledtext.ScrolledText(root,width=35,height=15)
textb.place(x=350,y=90)

#文本提取框1

label=tkinter.Label(root,text="网页2文本提取:",fg="black",font=("宋体", 10),)
label.place(x=330,y=300)

textb1=scrolledtext.ScrolledText(root,width=35,height=15)
textb1.place(x=350,y=330)




def jiexia():
    '''
    #sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8') #改变标准输出的默认编码
    a="http://"+e.get()
    html = requests.get(a)
    
    html.encoding = 'utf-8' #这一行是将编码转为utf-8否则中文会显示乱码。
    #print(html.text)
    #texta.insert(1.0,html.text)

    
    soup =BeautifulSoup(html.text,"html.parser")
    #print(soup.prettify())
    
    texta.delete(1.0,'end')
    texta.insert(1.0,soup.prettify())

    

    comments = soup.findAll(text=lambda text: isinstance(text, Comment))
    [comment.extract() for comment in comments]
    [s.extract() for s in soup(['script','iframe'])]
    texta1.delete(1.0,'end')
    texta1.insert(1.0,soup.get_text())
    '''
    url = e.get()
    news = Article(url, language='zh')
    news.download()  #先下载
    news.parse()    #再解析
    
    texta.delete(1.0,'end')#网页源码
    texta.insert(1.0,news.html)

    texta1.delete(1.0,'end')#正文
    texta1.insert(1.0,news.title)
    texta1.insert(2.0,news.text)

    '''
    tfidf_vec = TfidfVectorizer()
    tfidf_matrix = tfidf_vec.fit_transform(news.title)
    # 得到语料库所有不重复的词
    print(tfidf_vec.get_feature_names())
    # 得到每个单词对应的id值
    print(tfidf_vec.vocabulary_)
    '''

    
    
def jiexib():
    '''
    #sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8') #改变标准输出的默认编码
    c="http://"+b.get()
    htmll = requests.get(c)
    htmll.encoding = 'utf-8' #这一行是将编码转为utf-8否则中文会显示乱码。
    #print(htmll.text)
    soup1 =BeautifulSoup(htmll.text,"html.parser")
    #print(soup.prettify())


    textb.delete(1.0,'end')
    textb.insert(1.0,soup1.prettify())


    comments = soup1.findAll(text=lambda text: isinstance(text, Comment))
    [comment.extract() for comment in comments]
    [s.extract() for s in soup1(['script','iframe'])]
    textb1.delete(1.0,'end')
    textb1.insert(1.0,soup1.get_text())
    '''
    
    urll = b.get()
    newsa = Article(urll, language='zh')
    newsa.download()  #先下载
    newsa.parse()    #再解析
    
    textb.delete(1.0,'end')#网页源码
    textb.insert(1.0,newsa.html)

    textb1.delete(1.0,'end')#正文
    textb1.insert(1.0,newsa.title)
    textb1.insert(2.0,newsa.text)



w=tkinter.Button(text="解析网址1",command= jiexia)
w.grid(column=2,row=0)

q=tkinter.Button(text="解析网址2",command= jiexib)
q.grid(column=2,row=1)

q=tkinter.Button(text="计算TFIDF",command= jiexib)
q.grid(column=3,row=1)

