from fastapi import FastAPI
from starlette.requests import Request
import os
from fastapi.templating import Jinja2Templates
from starlette.staticfiles import StaticFiles
from starlette.responses import RedirectResponse
from typing import List
from fastapi import FastAPI, File, UploadFile
from starlette.responses import HTMLResponse
import uvicorn
import requests
from lxml import etree
import json
app = FastAPI()
#location='static'
#app.mount('/static', StaticFiles(directory=location), name='static')
search0='''
<html>
<head>
  <meta charset="utf-8">
  <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
  <title></title>
  <link rel="stylesheet" href="">
  <link rel="stylesheet" type="text/css" href="/static/mystyle.css">


</head>
<body>
<div id="lg" class="s-p-top">
<center><a href="https://www.etsi.org/deliver/etsi_ts"><img hidefocus="true" id="s_lg_img_new" class="index-logo-srcnew" src="//www.etsi.org/templates/etsi/img/logo.svg" width="270" height="129" onerror="this.src='//www.baidu.com/img/flexible/logo/pc/index@2.png';this.onerror=null;" usemap="#mp">
</a></center>


<center>
<form class="layui-form" action="search" text-aglin:center>
  <div class="layui-form-item">
  <div class="layui-inline" >
    <label class="layui-form-label"></label>
    <div class="layui-form-item">
     <input type="text" name="q" required  lay-verify="required" placeholder="" autocomplete="off" class="layui-input-inline" style="text-align: center;background-color: #f3fdff;width: 520px;height: 40px;">
     <button class="layui-btn layui-btn-normal" lay-submit lay-filter="formDemo">文档编号</button>
    </div>
    </div>

  </div>

  </form>
  </center>

  </body>
  </html>
'''
result0='''
<html>
<body>
{}
</body>
</html>
'''
@app.get("/")
def root():
        return RedirectResponse(url='/search')

@app.get("/search")
def search(q: str = None):
    print('q is ',q,type(q))
    if q is None:
        return HTMLResponse(content=search0)
    try:
        data_to_append = {'data':q}
        # 从文件中读取JSON数据
        try:
            with open('data.json', 'r') as file:
                data = json.load(file)
        except (json.JSONDecodeError, FileNotFoundError):
            data = []

        # 追加要写入的数据
        data.append(data_to_append)

        # 将数据写入JSON文件
        with open('data.json', 'w') as file:
            json.dump(data, file, indent=4)
        # 读取JSON文件
        with open('data.json', 'r') as file:
            data_to_read = json.load(file)
        results=[]
        for item in data_to_read:
            results.append(item['data'])
        results=results[::-1]
        print(results)  #
        filename,link=first_level(mainlink0,int(q))
        print('try',filename,link)
    except:
        filename,link='查无此编号文件',mainlink0
        print('except',filename,link)

    newlist1='<p align="center"><a href="{}">{}</a></p>'.format(link,filename.replace(q,'<font size="3" color="red">'+q+'</font>'))
    history=''
    for item in results:
        history=history+'<p align="center"><a href="search?q={}">{}</a></p>'.format(item,item)
    #item=q
    #newlist1=newlist1.replace(item,'<font size="3" color="red">'+item+'</font>')

    return HTMLResponse(content=search0+result0.format(newlist1+history))

def first_level0(link,y):

    response=requests.get(link,proxies=proxies,timeout=4, verify=False)
    text=response.text
    tree = etree.HTML(text)
    linkx=tree.xpath("//@href")
    if len(str(y))==8:
        y0=y
        print('here y0',y0)
        y=y//100

    elif len(str(y))==10:
        y0=y
        y=y//10000
    else:
        y0=y
    print('start search',y,y0)
    for x in linkx[1:]:
        min0=int(x.split('/')[-2].split('_')[0])
        max0=int(x.split('/')[-2].split('_')[1])
        #print(y)

        #print(max0,y,min0)
        if max0>=y>=min0:
            print(y0,site+x+str(y0))
            return third_level(site+x+str(y0))
            break
        else:
            pass


def first_level(link,y):
    if os.path.exists('a.html'):
        pass
    else:
        response=requests.get(link,proxies=proxies,timeout=4, verify=False)
        text=response.text
        with open('a.html','w') as f:
            f.write(text)
    with open('a.html','r') as f:
        text=f.read()
    tree = etree.HTML(text)
    linkx=tree.xpath("//@href")
    if len(str(y))==8:
        y0=y
        print('here y0',y0)
        y=y//100

    elif len(str(y))==10:
        y0=y
        y=y//10000
    else:
        y0=y
    print('start search',y,y0)
    for x in linkx[1:]:
        min0=int(x.split('/')[-2].split('_')[0])
        max0=int(x.split('/')[-2].split('_')[1])
        #print(y)

        #print(max0,y,min0)
        if max0>=y>=min0:
            print(y0,site+x+str(y0))
            return third_level(site+x+str(y0))
            break
        else:
            pass
#第三层目录，爬取所有链接，只取最新版
def third_level(link):
    #s = requests.Session()

#s.get('https://httpbin.org/cookies/set/sessioncookie/123456789')
#r = s.get('https://httpbin.org/cookies')

#print(r.text)
    response=s.get(link,proxies=proxies,timeout=4, verify=False)
    text=response.text
    #print('text:',text)
    tree = etree.HTML(text)
    #print(tree)
    linkx=tree.xpath("//@href")
    #fourth_level(site+)
    #print(site+linkx[-1])
    print('link',site+linkx[-1])
    return fourth_level(site+linkx[-1])

#第四层目录，爬取所有链接，只取最中的文档和zip的链接
def fourth_level(link):
    #s = requests.Session()
    response=s.get(link,proxies=proxies,timeout=4, verify=False)
    text=response.text
    #print('text:',text)
    tree = etree.HTML(text)
    #print(tree)
    linkx=tree.xpath("//@href")
    #print(linkx)
    for x in linkx[1:]:
        print(x.split('/')[-1],site+x)
        return x.split('/')[-1],site+x
        #write_to_html(x.split('/')[-1],site+x)
            #print('no')
        #print("*********",x,x.split('/')[3],x.split('/')[3]>=level1)
        #判断上次断点位置，并进入二级目录
        #if x.split('/')[3]>=level1:
        #    second_level(site+x,level2)
        #else:
        #    print('跳过一级目录，已存在')
if __name__ == "__main__":
    mainlink0='https://www.etsi.org/deliver/etsi_ts'
    site='https://www.etsi.org'
    proxies={

            }
    s = requests.Session()
    uvicorn.run(app, host="0.0.0.0",port=83,log_level="info")
