from urllib import request
import re

# BeautifulSoup 快速提取网页内容 scrapy
'''
  明确目的
    找到数据对应的网页
    分析网页的结构找到数据所在的标签位置
  模拟HTTP请求，向服务器发送这个请求，获取找到服务器，返回给我们的HTML
'''
class Spider():
  url = 'https://egame.qq.com/livelist'
  # '<span class="popular" .*?><img .*?>([\s\S]*?)</span>'
  root_pattern = '<div class="info-anchor" .*?>([\s\S]*?)<\/div>'
  name_pattern = '<p class="name" .*?>([\s\S]*?)<\/p>'
  hot_pattern = '<span class="popular" .*?><img .*?>([\s\S]*?)\n\s*<\/span>'
  def __fetch_content(self):
    r = request.urlopen(Spider.url)
    htmls = str(r.read(), encoding='utf-8')
    return htmls
    
  def __analysis(self, htmls):
    root_html = re.findall(Spider.root_pattern, htmls)
    anchors = []
    for html in root_html:
      name = re.findall(Spider.name_pattern, html)
      hot = re.findall(Spider.hot_pattern, html)
      anchor = {'name': name, 'number': hot}
      anchors.append(anchor)
    return anchors
  
  # 精炼
  def __refine(self, anchors):
    l = lambda anchor : {'name': anchor['name'][0].strip(), 'number': anchor['number'][0].strip()}
    return map(l, anchors)
  # 排序
  def __sort(self, anchors):
    anchors = sorted(anchors, key=lambda x:float(x['number'][:-1]), reverse=True)
    return anchors
  # 排序处理
  def __sort_seed(self, anchor):
    r = re.findall('\d*', anchor['number'])
    number = float(r[0])
    if '万' in anchor['number']:
      number *= 10000
    return number
  # 展示
  def __show(self, anchors):
    for index, anchor in enumerate(anchors):
      print(str((index + 1)) + " " + anchor['name'] + '------' + anchor['number'])
  def go(self):
    htmls = self.__fetch_content()
    anchors = self.__analysis(htmls)
    anchors = list(self.__refine(anchors))
    anchors = self.__sort(anchors)
    self.__show(anchors)
    pass

sp = Spider()
sp.go()

