#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import random
import string
import json

from bs4 import BeautifulSoup
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Spider, Request, Rule
from ..items import UrlItem, Content_Item_YJ, Label_Item_YJ_Temp, Base_Img_Item
from datetime import date

from .. import database as db

cursor = db.connection.cursor()


class TaoGuBa_Cont_YJ_Spider(Spider):
    name = 'taoguba_yj'
    sql = "select * from tb_urls_yj"
    cursor.execute(sql)

    label_sets = ()
    start_urls = [url['url'] for url in cursor.fetchall()]
    start_urls.reverse()  # 反转一下 优化一下更新的操作
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0"}
    # 指定cookies
    cookies = {
        'JSESSIONID': '341e262c-8ec7-4804-9adf-acdad4a31240',
        'UM_distinctid': '16ddd47cb3e38a-0b75361baa3dd9-b363e65-161012-16ddd47cb3f38c',
        'Hm_lvt_cc6a63a887a7d811c92b7cc41c441837': '1571375533', 'tgbuser': '3592025',
        'tgbpwd': 'E66BE235684br5mgcddtpr7zth', 'onedayyszc': '1571414400000',
        'CNZZDATA1574657': 'cnzz_eid%3D1674106506-1571373394-%26ntime%3D1571627532',
        'Hm_lpvt_cc6a63a887a7d811c92b7cc41c441837': '1571631277'
    }

    def start_requests(self):
        for url in self.start_urls:
            yield Request(url, callback=self.parse, cookies=self.cookies)

    def parse(self, response):
        if 302 == response.status:
            print(response.url)
        else:
            soup = BeautifulSoup(response.text)
            pre_date = response.xpath('//*[@id="new_wrap_container"]/div[1]/div[2]/span[2]/text()').extract()[0]

            # 通过一个页面的需要进行数据去重处理，否则会记录同一天的股票相同的涨停
            content_item_dict = {}

            # 进行时间组装
            pre_date = pre_date[:10]
            cur_url_date = self.get_date_from_str(pre_date)

            img_url = response.xpath('//*[@id="first"]/div[1]/img/@src2').extract()[0]
            # 提取帖子标题作为文件目录名
            title = response.xpath('//*[@id="b_subject"]/text()').extract()[0]

            soup = BeautifulSoup(response.text)
            img_urls_soup = soup.select_one("div[id='first']").select(
                "img[onclick='opennewimg(this)']")  # 部分在获取时和网页的部分不太相同
            for img_url_selector in img_urls_soup:
                img = Base_Img_Item()
                img['download_url'] = img_url_selector["src2"]
                img['rec_time'] = cur_url_date
                img['pre_dir'] = title + pre_date
                yield img

            line_count = 0;
            head_content_selector = soup.select_one("div[id='first']")
            head_content = ''
            # 去除其中的图片标签 可以不做。
            if head_content_selector:
                for head_content_child in head_content_selector.children:
                    if head_content_child.name == 'br':
                        head_content += '\n'
                    elif head_content_child.string:
                        head_content += head_content_child.string
                        line_count += 1

            # all_text_line = response.xpath('//*[@id="first"]/text()').extract()
            # 这里需要进行一下转化
            # all_content = ""
            # print(all_text_line)
            # for line in all_text_line:
            #      all_content+=line;
            # print(all_content)
            # 这里是对首部的进行操作
            pat = r'([0-9]{6})\s*(\.\*?[A-Z]{2}\*?)?\s*([\u4E00-\u9FA5A-Za-z]{2,10}).*?\d{1,2}:\d{1,2}(:\d{1,2})?\s?([\u4E00-\u9FA5A-Za-z+0-9\s%.]{0,60}[\u4E00-\u9FA5A-Za-z+\s%.])?(\s*\d{0,2})\n'

            pattern = re.compile(
                pattern=pat)

            # 提取中间可能的一剑飘的回帖内容
            repley_contents_soup_all = soup.find_all(name='div', attrs={"id": re.compile("reply_511605_.*")})
            # 提取中间部分的图片
            for one_repley in repley_contents_soup_all:
                img_urls_soup = one_repley.select("img[onclick='opennewimg(this)']")
                for img_url_selector in img_urls_soup:
                    print("tyope:%s" % type(img_url_selector))
                    img = Base_Img_Item()
                    img['download_url'] = img_url_selector["src2"]
                    img['rec_time'] = cur_url_date
                    img['pre_dir'] = title + pre_date
                    yield img
            # 提取下方的内容
            repley_contents = ''
            if repley_contents_soup_all:
                for repley_child in repley_contents:
                    if repley_child.name == 'br':
                        repley_contents += '\n'
                    elif repley_child.string:
                        repley_contents += repley_child.string
                        line_count += 1

            # 拼接数据并进行处理
            totle_repley_contents = head_content + repley_contents
            totle_repley_contents = totle_repley_contents.replace("⎧", "").replace("⎫", "")
            totle_repley_contents = totle_repley_contents.replace("\r", "\n")

            if len(pattern.findall(totle_repley_contents)) < 6:
                # 进行模式的切换
                for one_repley in repley_contents_soup_all:
                    totle_repley_contents += one_repley.text
                totle_repley_contents += head_content_selector.text
            # 替换\r to \n
            totle_repley_contents= totle_repley_contents.replace("\r", "\n")

            for one_line_match in pattern.findall(string=totle_repley_contents):
                # 这里面有很多组数据
                # print("one_line_match:%s" %one_line_match[0])
                vis = False;
                content_item = Content_Item_YJ()
                temp_label_s = Label_Item_YJ_Temp()

                content_item['full_time'] = cur_url_date
                temp_label_s['rec_time'] = cur_url_date
                # print(one_line_match[0])
                # r'(([0-9]{6})\s*(\.[A-Z]{2})?\s*([\u4E00-\u9FA5A-Za-z0-9]{2,10}).*?\d{1,2}:\d{1,2}:\d{1,2}\s?([\u4E00-\u9FA5A-Za-z+]{1,30})\s?[0-9]{1,2})',
                content_item['code'] = one_line_match[0]
                content_item['name'] = one_line_match[2]
                # print(type(str(m.group(3))))
                content_item['full_res'] = self.format_str_2_quote(one_line_match[4])
                save_label_temp = None
                # 这里对于空的处理
                if content_item['full_res']:
                    save_label_temp = content_item['full_res'].split(',')
                temp_label_s['labs'] = save_label_temp
                vis = True
                content_item_dict[content_item['code']] = content_item
                yield temp_label_s  # 返回本页面的所有标签，需要注意的是这里并没有进行处理，标签数据的处理会放在pipline中也就是说temp_label_s :{full_time:2019-03-02,labs:['5G','HUAWEI']}
            # print("page:%s content is %s has %s " % (response.url, totle_repley_contents, len(content_item_dict)))

            print("page:%s content is %s has %s items are : %s" % (
                response.url, totle_repley_contents, len(content_item_dict), content_item_dict.values()))

            # 提取本页面不同的item
            for content_item in content_item_dict.values():
                yield content_item

    def closed(self, reason):
        pass

    # 2019-2-19
    def get_date_from_str(self, date_s):
        dss = date_s.split("-")
        return date(int(dss[0]), int(dss[1]), int(dss[2]))

    def format_str_2_quote(self, str):
        if str:
            return str.replace("+", ",").replace("、", ",").replace(" ", "").strip()
        return str
