from flask import Flask,render_template,request,redirect
app = Flask(__name__)
# 首页：
@app.route("/")
def index():
    return render_template("index.html")

# 处理用户登录的请求
@app.route("/login",methods=['POST','GET'])
def login():
    username = request.form['username']  #接受到输入的用户名
    userpwd = request.form['userpwd']  #接收到输入的密码
    if username == 'admin':
        if userpwd == 'admin123':
            return redirect("scrawl")  #跳转到爬虫路由
        else:
            return redirect("/")  #返回登陆页面
    else:
        return redirect("/")  #返回登陆页面

#爬虫作业：网页端表单设计
@app.route("/scrawl")
def scrawler():
    return render_template("scrawl.html")

@app.route("/start",methods=['POST','GET'])
def start():
    name=request.form['targetname']
    url=request.form['targeturl']
    # 调用爬虫函数开始爬去数据
    return str(crawl(url,selector="p"))

import requests
from bs4 import BeautifulSoup
def crawl(url,selector):
    headers={
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
    }
    #获取源代码
    htmlcodes = requests.get(url,headers=headers).text
    #解析源代码
    bs = BeautifulSoup(htmlcodes,'html.parser')
    #根据选择器返回文本
    return [item.text for item in bs.select(selector=selector)]

if __name__=="__main__":
    app.run()

