from flask import Flask, request
from flask_cors import CORS
from routes.WorkspaceRoute import workspaceRoute
from routes.BookshelfRoute import bookshelfRoute
from routes.DatabookRoute import databookRoute
from routes.spider.WebSpiderRoute import webSpiderRoute

from flask import make_response
from html.parser import HTMLParser
from bs4 import BeautifulSoup
import urllib
import base64

from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options

import re
import json

import gzip
import io

app = Flask(__name__)
app.config.update(RESTFUL_JSON=dict(ensure_ascii=False))
CORS(app, resources=r'/*')

@app.route('/')
def index():
    path = request.args.get("path")
    print(path)
    return "it's a directory"

## @Deprecated
@app.route('/get-chess')
def getChess():
    path = request.args.get("path")

    return "getChess"

## @Deprecated
@app.route('/img/upload', methods=['POST'])
def send_img():
    f = request.files['file']
    imgData = f.read()

    return imgData

@app.route('/webview/proxy', methods=['GET', 'POST', 'HEAD', 'OPTIONS', 'PUT', 'DELETE', 'TRACE', 'CONNECT'])
def webview_proxy():
    currentRootUrl = request.url_root
    currentUrl = request.base_url
    bodyData = request.stream.read()
    method = request.method

    url = request.args.get("url")
    url = str(base64.b64decode(url), "utf-8")
    url = urllib.parse.unquote(url)
    # print(url)

    Referer = request.args.get("Referer")
    if Referer:
        Referer = str(base64.b64decode(Referer), "utf-8")
        Referer = urllib.parse.unquote(Referer)
    # print(Referer)

    urlParsedResult = urllib.parse.urlparse(url)

    headers = request.headers
    UserAgent = headers.get('User-Agent')
    AcceptLanguage = headers.get('Accept-Language')
    Connection = headers.get('Connection')
    ContentType = headers.get('Content-Type')

    UserAgent = UserAgent if UserAgent else 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'
    AcceptLanguage = AcceptLanguage if AcceptLanguage else 'zh-CN,zh;q=0.9'
    Connection = Connection if Connection else 'keep-alive'
    # targetUrl = urlParsedResult.scheme + "://" + urlParsedResult.netloc

    nHeaders = {
        'User-Agent': UserAgent,
        'Accept-Language': AcceptLanguage,
        'Connection': Connection
    }

    if Referer:
        nHeaders['Referer'] = Referer

    if ContentType:
        nHeaders['Content-Type'] = ContentType

    '''
    import http.client

    page = urllib.parse.urlencode({})
    params = urllib.parse.urlencode({})

    conn = http.client.HTTPConnection(urlParsedResult.netloc, timeout=10)
    conn.request("GET", urlParsedResult.path, urlParsedResult.params, nHeaders)

    r1 = conn.getresponse()
    webview_stream = r1.read()
    print(webview_stream)
    webview_data = base64.b64encode(webview_stream).decode('ascii')

    conn.close()

    return u"data:image/png;base64," + webview_data
    '''

    httpRequestType = 1 # 1 - _HttpRequest , 2 - _ChromeHttpRequest
    if httpRequestType == 1 :
        response = _HttpRequest(url, nHeaders, bodyData, method)
    else:
        response = _ChromeHttpRequest(url, nHeaders)

    code = response['code'] # 状态码
    info = response['headers'] # 响应头
    data = response['data'] # 数据

    if httpRequestType == 1 :
        if 'Content-Type' in info:
            respContentType = info['Content-Type']
            if 'text/html' in respContentType :
                data = _handleHtml(data, urlParsedResult, currentRootUrl, currentUrl)
                # data = _handleCss(bytes(data, encoding = "utf8"), urlParsedResult, currentRootUrl, currentUrl) # TODO 可能影响到js函数相关的代码【url(xxx)】
            if 'text/css' in respContentType :
                data = _handleCss(data, urlParsedResult, currentRootUrl, currentUrl)
            if 'javascript' in respContentType : # text/javascript 、 application/x-javascript 、 application/javascript
                data = _handleJavascript(data, urlParsedResult, currentRootUrl, currentUrl)

    resp = make_response(data) # 响应体数据
    resp.status = code # 状态码

    resp.headers['A-ORIGIN-URL'] = url

    for key in info:
        if (key.lower() != 'X-Frame-Options'.lower() 
            and key.lower() != 'content-security-policy'.lower() 
            and key.lower() != 'Content-Encoding'.lower() 
            and key.lower() != 'Content-Length'.lower()):
            resp.headers[key] = info[key] # 通过字典的形式添加响应头

    return resp

@app.route('/screen/capture', methods=['GET'])
def screen_capture():
    url = request.args.get("url")
    url = str(base64.b64decode(url), "utf-8")
    url = urllib.parse.unquote(url)
    # print(url)

    chrome_options = Options()
    chrome_options.add_argument("--headless") # 解决 DevToolsActivePort 文件不存在的报错
    chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug
    chrome_options.add_argument('--no-sandbox') # 把Chrome设置成无界面模式
    chrome_options.add_argument('window-size=1440x900') # 设置浏览器分辨率
    chrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条，应对一些特殊页面
    # chrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片提升运行速度

    import platform
    driverPath = ''
    sysstr = platform.system()
    if(sysstr =="Windows"):
        driverPath = './sdk/chromedriver/win32/91.0.4472.19/chromedriver.exe'
    elif(sysstr == "Linux"):
        driverPath = './sdk/chromedriver/linux/91.0.4472.19/chromedriver'
    else:
        driverPath = ''

    driver = Chrome(executable_path = driverPath, options = chrome_options) # 创建无界面对象
    driver.get(url)

    # 接下来是全屏的关键，用js获取页面的宽高，如果有其他需要用js的部分也可以用这个方法
    width = driver.execute_script("return document.documentElement.scrollWidth")
    height = driver.execute_script("return document.documentElement.scrollHeight")
    print(width,height)

    # 将浏览器的宽高设置成刚刚获取的宽高
    driver.set_window_size(width, height)

    # driver.current_window_handle # 获取当前窗口句柄集合
    screenshotBase64 = driver.get_screenshot_as_base64()

    driver.close()

    html = '<img src="data:image/png;base64,' + screenshotBase64 + '">'
    resp = make_response(html) # 响应体数据
    resp.status = 200 # 状态码

    resp.headers['Content-Type'] = 'text/html; charset=utf-8' # 通过字典的形式添加响应头

    return resp

@app.route('/mhtml/transfor', methods=['GET'])
def mhtml_transfor():
    url = request.args.get("url")
    url = str(base64.b64decode(url), "utf-8")
    url = urllib.parse.unquote(url)
    # print(url)

    chrome_options = Options()
    chrome_options.add_argument("--headless") # 解决 DevToolsActivePort 文件不存在的报错
    chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug
    chrome_options.add_argument('--no-sandbox') # 把Chrome设置成无界面模式
    chrome_options.add_argument('window-size=1440x900') # 设置浏览器分辨率
    chrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条，应对一些特殊页面
    # chrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片提升运行速度

    import platform
    driverPath = ''
    sysstr = platform.system()
    if(sysstr =="Windows"):
        driverPath = './sdk/chromedriver/win32/91.0.4472.19/chromedriver.exe'
    elif(sysstr == "Linux"):
        driverPath = './sdk/chromedriver/linux/91.0.4472.19/chromedriver'
    else:
        driverPath = ''

    driver = Chrome(executable_path = driverPath, options = chrome_options) # 创建无界面对象
    driver.get(url)

    # 1. 执行 Chome 开发工具命令，得到mhtml内容
    res = driver.execute_cdp_cmd('Page.captureSnapshot', {})
    driver.quit()

    '''
    # 2. 写入文件
    with open('qq.mhtml', 'w', newline='') as f:   # 根据5楼的评论，添加newline=''
        f.write(res['data'])
    '''

    resp = make_response(res['data']) # 响应体数据
    resp.status = 200 # 状态码

    filename = 'index'
    resp.headers['Content-Type'] = 'application/octet-stream' # 通过字典的形式添加响应头
    resp.headers["Content-disposition"] = 'attachment; filename=%s.mhtml' % filename

    return resp

def _HttpRequest(url, headers, bodyData, method):
    req = urllib.request.Request(url, headers=headers, data=bodyData, method=method)

    try:
        response = urllib.request.urlopen(req)
        code = response.getcode() # 状态码
        url = response.geturl() # 访问地址
        info = response.info() # 响应头
        data = response.read() # 数据
        # print(code)
        # print(url)
        # print(info)
        # print(data)

        if ('Content-Encoding' in info and info['Content-Encoding'] == 'gzip') or ('content-encoding' in info and info['content-encoding'] == 'gzip'):
            compressedStream = io.BytesIO(data)
            gziper = gzip.GzipFile(fileobj = compressedStream)  
            data = gziper.read()

        return {'code': code, 'headers': info, 'data': data}
    except Exception as e:
        # TODO 打印详细错误信息，不同错误类型细化分开处理
        print(e)
        return {'code': '500', 'headers': {}, 'data': "Error {0}".format(str(e))}

def _ChromeHttpRequest(url, headers):
    chrome_options = Options()
    chrome_options.add_argument("--headless") # 解决 DevToolsActivePort 文件不存在的报错
    chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug
    chrome_options.add_argument('--no-sandbox') # 把Chrome设置成无界面模式
    chrome_options.add_argument('window-size=1920x3000') # 设置浏览器分辨率
    chrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条，应对一些特殊页面
    chrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片提升运行速度

    driver = Chrome(executable_path = './sdk/chromedriver/linux/91.0.4472.19/chromedriver', options = chrome_options) # 创建无界面对象
    driver.get(url)

    # driver.current_window_handle # 获取当前窗口句柄集合
    htmlStr = driver.page_source # 数据

    driver.close()

    return {'code': '200', 'headers': {}, 'data': htmlStr}

def _handleHtml(data, urlParsedResult, currentRootUrl, webviewProxyUrl):
    '''
    wpHttpParser = WebviewProxyHttpParser()
    wpHttpParser.feed(str(data, "utf-8"))
    tags = wpHttpParser.getTags()
    wpHttpParser.close()
    print(tags)
    return data
    '''

    return _WebviewProxyBeautifulSoup4(data, urlParsedResult, currentRootUrl, webviewProxyUrl)

def _handleCss(data, urlParsedResult, currentRootUrl, webviewProxyUrl):
    def _handleCssReplaceUrl(matchobj):
        matchStr = matchobj.group(0)
        url = matchStr.replace(' ', '').replace("'", '').replace('"', '').replace('url(', '').replace(')', '')
        nUrl = _WebviewUrlCompletion(url, urlParsedResult, webviewProxyUrl)
        return 'url(' + nUrl + ')'
    # url(JEPG 、 JPG 、 PNG 、 GIF)
    # re.sub(pattern, repl, string, count=0, flags=0)
    data = re.sub('url\s*\([^()]+\)', _handleCssReplaceUrl, str(data, "utf-8"))
    return data

def _handleJavascript(data, urlParsedResult, currentRootUrl, webviewProxyUrl):
    #知乎相关页面处理
    '''
    # data = data.replace(bytes('"root"', encoding = "utf8"), bytes('"rootroot"', encoding = "utf8"))
    # data = bytes('', encoding = "utf8")
    if (urlParsedResult.path == '/heifetz/main.topstory-routes.943438755e4084c01815.js'):
        data = data.replace(bytes('.userType', encoding = "utf8"), bytes('', encoding = "utf8"))
    '''
    '''
    data = data.replace(bytes('window.location', encoding = "utf8"), bytes('window.locatioon', encoding = "utf8"))
    data = data.replace(bytes('document.location', encoding = "utf8"), bytes('document.locatioon', encoding = "utf8"))
    data = data.replace(bytes('location.hash', encoding = "utf8"), bytes('locatioon.hash', encoding = "utf8"))
    data = data.replace(bytes('location.host', encoding = "utf8"), bytes('locatioon.host', encoding = "utf8"))
    data = data.replace(bytes('location.hostname', encoding = "utf8"), bytes('locatioon.hostname', encoding = "utf8"))
    data = data.replace(bytes('location.href', encoding = "utf8"), bytes('locatioon.href', encoding = "utf8"))
    data = data.replace(bytes('location.pathname', encoding = "utf8"), bytes('locatioon.pathname', encoding = "utf8"))
    data = data.replace(bytes('location.port', encoding = "utf8"), bytes('locatioon.port', encoding = "utf8"))
    data = data.replace(bytes('location.protocol', encoding = "utf8"), bytes('locatioon.protocol', encoding = "utf8"))
    data = data.replace(bytes('location.search', encoding = "utf8"), bytes('locatioon.search', encoding = "utf8"))
    data = data.replace(bytes('location.assign(', encoding = "utf8"), bytes('locatioon.assign(', encoding = "utf8"))
    data = data.replace(bytes('location.reload(', encoding = "utf8"), bytes('locatioon.reload(', encoding = "utf8"))
    data = data.replace(bytes('location.replace(', encoding = "utf8"), bytes('locatioon.replace(', encoding = "utf8"))
    '''

    data = data.replace(bytes('location', encoding = "utf8"), bytes('locatioon', encoding = "utf8"))
    data = data.replace(bytes('document.referrer', encoding = "utf8"), bytes('document.referreer', encoding = "utf8"))
    data = data.replace(bytes('document.domain', encoding = "utf8"), bytes('document.domaiin', encoding = "utf8"))
    data = data.replace(bytes('history.pushState', encoding = "utf8"), bytes('historry.pushState', encoding = "utf8"))

    return data

def _WebviewProxyBeautifulSoup4(data, urlParsedResult, currentRootUrl, webviewProxyUrl):
    sp = BeautifulSoup(data, 'html.parser')

    # link 、 script 、 form 、 a 、 img

    link_list = sp.findAll('link')
    for link in link_list:
        if 'href' in link.attrs:
            # print(link['href'])
            link['href'] = _WebviewUrlCompletion(link['href'], urlParsedResult, webviewProxyUrl)

    script_list = sp.findAll('script')
    for script in script_list:
        if 'src' in script.attrs:
            # print(script['src'])
            script['src'] = _WebviewUrlCompletion(script['src'], urlParsedResult, webviewProxyUrl)

    form_list = sp.findAll('form')
    for form in form_list:
        if 'action' in form.attrs:
            # print(form['action'])
            form['action'] = _WebviewUrlCompletion(form['action'], urlParsedResult, webviewProxyUrl)

    '''
    a_list = sp.findAll('a')
    for a in a_list:
        if 'href' in a.attrs:
            # print(a['href'])
            a['href'] = _WebviewUrlCompletion(a['href'], urlParsedResult, webviewProxyUrl)
    '''

    '''
    img_list = sp.findAll('img')
    for img in img_list:
        if 'src' in img.attrs:
            # print(img['src'])
            img['src'] = _WebviewUrlCompletion(img['src'], urlParsedResult, webviewProxyUrl)
    '''

    '''
    style_list = sp.findAll('style')
    for style in style_list:
        print(style)
    '''

    head = sp.head
    if head:
        urlParsedBase64 = base64.b64encode(bytes(json.dumps({
            'scheme': urlParsedResult.scheme,
            'netloc': urlParsedResult.netloc,
            'path': urlParsedResult.path,
            'params': urlParsedResult.params,
            'query': urlParsedResult.query,
            'fragment': urlParsedResult.fragment,
            'webviewProxyUrl': webviewProxyUrl
        }), encoding = "utf8"))
        urlParsedBase64 = str(urlParsedBase64, encoding = "utf8")

        new_innerjs_tag = sp.new_tag('script')
        new_innerjs_tag.append(
            'var urlParsed = "' + urlParsedBase64 + '"\n' +
            'urlParsed = JSON.parse(atob(urlParsed))'
        )

        new_hookjs_tag = sp.new_tag('script', type = 'text/javascript', src = currentRootUrl + 'static/js/hook.js')

        if len(head.contents) > 0:
            head.contents[0].insert_before(new_hookjs_tag)
            head.contents[0].insert_before(new_innerjs_tag)
        elif head.string :
            head.string.insert_before(new_hookjs_tag)
            head.contents[0].insert_before(new_innerjs_tag)
        else:
            head.insert_before(new_hookjs_tag)
            head.contents[0].insert_before(new_innerjs_tag)

    return sp.prettify()

def _WebviewUrlCompletion(url, urlParsedResult, webviewProxyUrl):
    # print(urlParsedResult.scheme)
    # print(urlParsedResult.netloc)
    # print(urlParsedResult.path)

    nURl = url

    # data:image 、 https:// 、 http:// 、 javascript: 、 #
    if url and url.startswith('data:image') == False and url.startswith('https://') == False and url.startswith('http://') == False and url.startswith('javascript:') == False and url.startswith('#') == False :
        if url.startswith('//'):
            nURl = urlParsedResult.scheme + ":" + url
        elif url.startswith('/'):
            nURl = urlParsedResult.scheme + "://" + urlParsedResult.netloc + url
        else:
            nURl = urlParsedResult.scheme + "://" + urlParsedResult.netloc + urlParsedResult.path + '/../' + url

    if nURl.startswith('https://') or nURl.startswith('http://'):
        nUrlBase64 = base64.b64encode(bytes(urllib.parse.quote(nURl), encoding = "utf8"))
        Referer = base64.b64encode(bytes(urllib.parse.quote(urlParsedResult.scheme + "://" + urlParsedResult.netloc), encoding = "utf8"))
        nURl = webviewProxyUrl + '?url=' + str(nUrlBase64, encoding = "utf8") + '&Referer=' + str(Referer, encoding = "utf8")

    return nURl

class WebviewProxyHttpParser(HTMLParser):
    def __init__(self):
        HTMLParser.__init__(self)
        self.tags = {}

    def getTags(self):
        return self.tags.keys()

    def handle_starttag(self, tag, attrs):
        '''
        if tag == 'a':
            for name,value in attrs:
                if name == 'href' and value.startswith('http'):
                    print value
        '''
        self.tags[tag] = tag

app.register_blueprint(workspaceRoute)
app.register_blueprint(bookshelfRoute)
app.register_blueprint(databookRoute)
app.register_blueprint(webSpiderRoute)

if __name__ == '__main__':
    app.run(host="0.0.0.0")