#爬虫模块从，爬取相应的网站的内容
import requests
from lxml import etree
link='http://www.zongheng.com/' #纵横中文小说网

#内网加入代理
proxies={
'http':'proxy.x.x',
'https':'proxy.x.x'
}
r = requests.get(link,verify=False,proxies=proxies) # 豆瓣首页
#print(type(r.text))
r.encoding='utf-8'
html=r.text
#print(html)
lines=html.split('\r\n')

print('\n'.join(lines))

#将爬取的网页内容写入数据库
from tinydb import TinyDB, Query
db_file='db1.json'
db = TinyDB(db_file)

db.insert_multiple({'content': line} for line in lines)
#for line in lines:
    #print(line)
#    db.insert({"content": line})
db.close()

db = TinyDB(db_file)
User = Query()
#User = Query()
test_contains = lambda value, search: search in value
keyword='鬼'
print(db.search(User['content'].test(test_contains, keyword)))
db.close()

from fastapi import FastAPI
import uvicorn
from starlette.responses import HTMLResponse

app = FastAPI()


@app.get("/")
async def root(q: str=None):
    content='''

<body>
<h4>百度一下</h4>
<form name="input" action="" method="get">
<input type="text" name="q">
<input type="submit" value="搜索">
</form>

</body>

    '''
    if q:
        db = TinyDB(db_file)
        User = Query()
        #User = Query()
        test_contains = lambda value, search: search in value
        #print(db.all())
        #test_func = lambda s: s in '本页版权'
        keyword=q
        print(db.search(User['content'].test(test_contains, keyword)))
        results=db.search(User['content'].test(test_contains, q))
        db.close()
        z=''
        for result in results:
            z=z+result['content']
        
        return HTMLResponse(content=content+z)
    else:
        return HTMLResponse(content=content)
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0",port=8080,log_level="info")
