# coding=utf-8
# need to pip install:
# BeautifulSoup lxml cssutils
from typing import List
import urllib.request
import re
import json
from bs4 import BeautifulSoup

domainUrl = 'https://www.plazmaburst2.com/'
url = {
    'Single-player': "https://www.plazmaburst2.com/?s=1&a=&_sect=1",
    'Cooperative': "https://www.plazmaburst2.com/?s=1&a=&_sect=2",
    'Deathmatch': 'https://www.plazmaburst2.com/?s=1&a=&_sect=3',
}
highlightedUrl = "https://www.plazmaburst2.com/?s=1&a="


def getLocalHtml():
    fh = open('test.html', 'r', encoding='utf-8')
    html = ""
    lines = fh.readlines()
    for line in lines:
        html += line

    # 注释掉这个可以联网获取
    return html


def getHtml(url):
    headers = {
        # 'GET https': '//www.plazmaburst2.com/?s=1&a=&_sect=1 HTTP/1.1',
        # 'Host': ' www.plazmaburst2.com',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': ' 1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': ' zh-CN,zh;q=0.9',
        'Cookie': 'login=Guest_01371;password=aef8a1cf00e4549ddfac544e26b75e01',
    }
    request = urllib.request.Request(url=url, headers=headers)
    html = urllib.request.urlopen(request)
    return html


def getData(html):
    soup = BeautifulSoup(html, 'html.parser')
    # 查找Dom
    data = soup.find(id='content_header_block').select('.bb .sg tr')
    # New popular maps 去除前3个元素，分别是搜索框，表头，列名 排行榜目前只有40行数据
    dataNew = data[4:4+20]
    # best maps 去除前3+20+3个元素
    dataBest = data[4+20+2:4+20+2+40]

    # print(dataBest[0])
    # return []
    # 总 结果
    result = []

    # 去除换行符合迷のamp;
    def getRowStr(data):
        return str(data).replace("\n", "").replace("amp;", "")

    # 获取表格数据并返回
    def getResult(type: str, data):

        # 正则表达式
        regex = {
            # 'number': '<br/>(.*?).<br/>',
            'map': '<a href="(.*?)">(.*?)</a> by <a href="(.*?)">(.*?)</a>',
            'mapImgUrl': 'background-image:url[(](.*?)[)];background-position',
            'mapId': "Map ID:','(.*?)'",
            'mapRate': "Rated: (.*?) of (.*?)",
        }
        result = []
        for item in data:
            # 每一列对应不同的td标签
            column = {
                'id': getRowStr(item.select('td')[0]),
                'map': getRowStr(item.select('td')[1]),
                'description': getRowStr(item.select('td')[2]),
                'mapId': getRowStr(item.select('td')[3]),
                'mapApprove': getRowStr(item.select('td')[4]),
                'mapRate': getRowStr(item.select('td')[5]),
            }
            # 获取每个单元格数据存在row对象里
            row = {
                'id': str(item.select('td')[0].get_text()).replace("\xa0", "").replace("\n", "").replace(".", "").strip(),
                # 'mapUrl': str(item.select('td')[1].span.select('a')[0].get('href')),
                'mapUrl': re.search(regex['map'], column['map']).group(1).strip(),
                'mapName': re.search(regex['map'], column['map']).group(2).strip().replace("\n", ""),
                'mapAuthorUrl': re.search(regex['map'], column['map']).group(3).strip(),
                'mapAuthor': re.search(regex['map'], column['map']).group(4).strip(),
                'mapImgUrl': '',
                'description': str(item.select('td')[2].get_text()).replace("\xa0", "").replace("\n", "").replace(".", "").strip(),
                'mapId': re.search(regex['mapId'], column['mapId']).group(1).strip(),
                'mapApprove': str(item.select('td')[4].get_text()).strip(),
                'mapRate':  float(re.search(regex['mapRate'], column['mapRate']).group(1).strip()),
            }
            # print("here", re.search(regex['mapId'],column['mapId']).group(1).strip(),)
            # break

            # mapImgUrl 特殊处理
            mapImgStyle = re.search(
                regex['mapImgUrl'],
                item.select('td')[1]['style'],
                flags=0
            )
            if not mapImgStyle is None:
                row['mapImgUrl'] = mapImgStyle.group(1)

            # mapApprove 特殊处理
            mapApproveStr = str(item.select('td')[4])
            mapApprove = "Approved, out of rotation" in mapApproveStr
            row['mapApprove'] = mapApprove

            # 行数据添加到结果中
            result.append(row)
        return result
    resultBest = {
        'id': 0,
        'name': "best",
        'data': getResult("best", dataBest)
    }
    resultNew = {
        'id': 1,
        'name': "new",
        'data': getResult("new", dataNew)
    }
    # result['best'] = getResult("best", dataBest)
    # result['new'] = getResult("new", dataNew)
    result.append(resultBest)
    result.append(resultNew)
    return result


def getHighlightedData(html):
    soup = BeautifulSoup(html, 'html.parser')
    # 查找Dom
    data = soup.find(id='content_header_block').select('td.bb>div')
    item = data[0]
    regex = {
        'mapImgUrl': 'background-image:url[(](.*?)[)];background-position',
        'mapId': "Map ID:','(.*?)'"
    }
    result = {
        'mapUrl': item.find('a').get('href'),
        'mapName': item.select('span>span')[0].get_text(),
        'mapAuthorUrl': '',
        'mapAuthor': item.select('span>span')[1].get_text(),
        'mapImgUrl': '',
        'mapId': re.search(
            regex['mapId'],
            str(item.select('span'))
        ).group(1),

        # 'description': item.select('span')[1].get_text()
    }
    mapImgStyle = re.search(
        regex['mapImgUrl'],
        item.select('span')[0].get('style'),
        flags=0
    )
    if not mapImgStyle is None:
        result['mapImgUrl'] = mapImgStyle.group(1)
    return result


def writeData():
    data = []
    for index, name in enumerate(url):
        print("now crawling: ", name)
        html = getHtml(url[name])
        row = {
            'id': index,
            'name': name,
            'data': getData(html)
        }
        data.append(row)
    fh = open('custom-map-result.json', 'w', encoding='utf-8')
    fh.write(json.dumps(data))
    fh.close()
    return True


def writeHighlightedData():
    # high lighted map:
    print("now crawling:  Highlighted")
    highHtml = getHtml(highlightedUrl)
    highData = getHighlightedData(highHtml)
    fh = open('custom-map-high-result.json', 'w', encoding='utf-8')
    fh.write(json.dumps(highData))
    fh.close()
    return True


# 主函数
if __name__ == "__main__":
    writeData()
    writeHighlightedData()

    exit(0)
