# +--------------------------
# | User: zq                -
# | Version: python3.7      -
# | Time: 2020-03-10 20:20                
# +--------------------------
"""
思路:
1. 解析得到所有的url
2. 解析所有的url 列表和文章页,获取数据
"""
import re
from urllib import parse

import requests
from scrapy import Selector
from datetime import datetime

from meetlove.models import *

domain = "http://www.meetlove520.com/"


def parse_article(base_url):
    res_text = requests.get(base_url, headers={'Connection':'close'}).text
    sel = Selector(text=res_text)
    arc_id = re.search('&Id=(\d+)&', base_url).group(1)

    create_time = sel.xpath("//span[@id='xn_c_solution_view_1_Text3']/text()").extract()[0]
    create_time = datetime.strptime(create_time, '%Y-%m-%d')
    yuedu_nums = sel.xpath("//span[@id='xn_c_solution_view_1_Text5']/text()").extract()[0]
    content = sel.xpath("//span[@id='xn_c_casev_1_Text3']").extract()[0]

    arc = Article()
    arc.arc_id = arc_id
    arc.create_time = create_time
    arc.yuedu_nums = int(yuedu_nums)
    arc.content = content

    existed_list = Article().select().where(Article.arc_id == arc_id)
    if existed_list:
        arc.save()
    else:
        arc.save(force_insert=True)


def get_urls(base_url):
    res_text = requests.get(base_url, headers={'Connection':'close'}).text
    sel = Selector(text=res_text)

    all_items = sel.xpath("//div[@id='xn_c_solution_30_wrap']/div")

    for item in all_items:
        title = item.xpath(".//span[@class='xn_c_solution_30_title1']/a/text()").extract()[0].strip()
        url = item.xpath(".//span[@class='xn_c_solution_30_title1']/a/@href").extract()[0]
        url = parse.urljoin(domain, url)
        picname = item.xpath(".//div[@class='xn_c_solution_30_pic']/a[1]/img[1]/@data-original").extract()[0]
        picname = parse.urljoin(domain, picname)
        desc = item.xpath(".//div[@class='xn_c_solution_30_nr3']/span[1]/text()").extract()[0]

        id = re.search('&Id=(\d+)&', url).group(1)

        list = List()
        list.id = id
        list.title = title
        list.picname = picname
        list.desc = desc

        existed_list = List().select().where(List.id == id)
        if existed_list:
            print('---正在保存---' + title + '---正在保存---')
            list.save()
        else:
            print('---正在保存---' + title + '---正在保存---')
            list.save(force_insert=True)


        # 解析文章内容
        parse_article(url)

    # 解析下一页
    next_url = sel.xpath("//span[@class='fy_6']/a/@href").extract()
    # 如果下一页存在,就取到下一页的连接,放入 next_url
    if next_url:
        next_url = parse.urljoin(domain, next_url[0])
        get_urls(next_url)


if __name__ == "__main__":
    urls = ['http://www.meetlove520.com/solution.aspx?TypeId=63&fid=t25:63:25',
            'http://www.meetlove520.com/solution.aspx?Id=167&fid=t25:167:25',
            'http://www.meetlove520.com/solution.aspx?TypeId=175&fid=t25:175:25']
    for url in urls:
        get_urls(url)
