#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re

from bs4 import BeautifulSoup
from scrapy.spiders import Spider, Request
from ..items import Content_Item_YJ, Label_Item_YJ_Temp, Base_Img_Item, Base_HTML_Content_Item
from datetime import date
from ..util import Strings
from .. import database as db

cursor = db.connection.cursor()


class TaoGuBa_Cont_SH_Spider(Spider):
    name = 'taoguba_sh'
    sql = "select * from TB_URLS_sh "
    cursor.execute(sql)

    parse_less_than_know_urls = []
    label_sets = ()
    start_urls = [url['url'] for url in cursor.fetchall()]
    # start_urls.reverse()  # 反转一下 优化一下更新的操作

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0"}
    # 指定cookies
    cookies = {
        'JSESSIONID': '341e262c-8ec7-4804-9adf-acdad4a31240',
        'UM_distinctid': '16ddd47cb3e38a-0b75361baa3dd9-b363e65-161012-16ddd47cb3f38c',
        'Hm_lvt_cc6a63a887a7d811c92b7cc41c441837': '1571375533', 'tgbuser': '3592025',
        'tgbpwd': 'E66BE235684br5mgcddtpr7zth', 'onedayyszc': '1571414400000',
        'CNZZDATA1574657': 'cnzz_eid%3D1674106506-1571373394-%26ntime%3D1571627532',
        'Hm_lpvt_cc6a63a887a7d811c92b7cc41c441837': '1571631277'
    }

    def write_2_file(self, str):
        file = open('a', 'w', encoding='utf8')
        file.write(str)
        file.close()

    def start_requests(self):
        for url in self.start_urls:
            yield Request(url, callback=self.parse, cookies=self.cookies)

    def parse(self, response):
        print("spider:name:%s" % self.name)
        if 302 == response.status:
            print("redirect url %s" % response.url)
        else:
            soup = BeautifulSoup(response.text)
            pre_date = response.xpath('//*[@id="new_wrap_container"]/div[1]/div[2]/span[2]/text()').extract()[0]
            # print(response.text)

            # 通过一个页面的需要进行数据去重处理，否则会记录同一天的股票相同的涨停
            content_item_dict = {}

            # 进行时间组装
            pre_date = pre_date[:10]
            cur_url_date = self.get_date_from_str(pre_date)

            # 提取帖子标题作为文件目录名
            title = response.xpath('//*[@id="b_subject"]/text()').extract()[0]

            soup = BeautifulSoup(response.text)
            img_urls_soup = soup.select_one("div[id='first']").select(
                "img[onclick='opennewimg(this)']")  # 部分在获取时和网页的部分不太相同

            for img_url_selector in img_urls_soup:
                img = Base_Img_Item()
                img['download_url'] = img_url_selector["src2"]
                img['rec_time'] = cur_url_date
                img['pre_dir'] = title + pre_date
                yield img

            head_content = soup.select_one("div[id='first']").text
            pat = r'(\s*[\u4E00-\u9FA5A-Za-z0-9_]*)\s*(\d{6})\s*([\u4E00-\u9FA5A-Za-z- ]{2,10})\s*((\d{1,2}:\d{1,2})|(\d*砸开\d*))\s*([\u4E00-\u9FA5A-Za-z\d\s\.+-]*)((##)|\s|\(|:)(.*?)(\s*\d{0,3}.?\d{0,3})\s*\n'

            pattern = re.compile(
                pattern=pat)
            # pattern="(([0-9]{6})\s*(\.[A-Z]{2})?\s*([\u4E00-\u9FA5A-Za-z0-9]{2,10}).*?\d{1,2}:\d{1,2}:\d{1,2}.*?([\u4E00-\u9FA5A-Za-z+])\s?[0-9]{1,10})")

            # 提取中间可能的shenghuo的回帖内容
            repley_contents_soup_all = soup.find_all(name='div', attrs={"id": re.compile(r"reply_563404_.*")})

            # 提取中间部分的图片
            for one_repley in repley_contents_soup_all:
                img_urls_soup = one_repley.select("img[onclick='opennewimg(this)']")
                for img_url_selector in img_urls_soup:
                    print("type:%s" % type(img_url_selector))
                    img = Base_Img_Item()
                    img['download_url'] = img_url_selector["src2"]
                    img['rec_time'] = cur_url_date
                    img['pre_dir'] = title + pre_date
                    yield img

            line_count = 0
            # 提取下方该博主回帖的内容
            repley_contents = ''
            for res in repley_contents_soup_all:
                # 去除多余的标签
                p_select = res.find(name='p', attrs={'id': re.compile(r"reply\d*")})
                alt_select = p_select.find_all(name='a', attrs={'onmouseover': re.compile(r"userTips\(this,.*")})
                for alt in alt_select:
                    alt.extract()
                #
                # repley_contents += p_select.text + "#" #追加结果数据
                # 使用br进行提取操作
                p_childrens = p_select.children
                for child in p_childrens:
                    if child.name != 'br' and child.string:
                        repley_contents += child.string + " "
                    else:
                        repley_contents += '\n'
                        line_count += 1

            # 拼接数据并进行处理，去除异常字符
            totle_repley_contents = head_content + repley_contents
            totle_repley_contents = Strings.format_str_spilt_2_quote(totle_repley_contents, "⎧⎫", '')  # 等待验证
            # print("head : %s , con : %s" % (head_content, repley_contents))

            # self.write_2_file(repley_contents)
            # 进行正文数据的处理
            # print("all match:",pattern.findall(totle_repley_contents))
            for one_line_match in pattern.findall(string=totle_repley_contents):
                # 这里面有很多组数据
                # print("one_line_match:%s" % str(one_line_match))
                content_item = Content_Item_YJ()
                temp_label_s = Label_Item_YJ_Temp()
                temp_label_s['rec_time'] = cur_url_date

                content_item['full_time'] = cur_url_date
                content_item['code'] = self.strip_str_strict(one_line_match[1])
                content_item['name'] = self.strip_str_strict(one_line_match[2])
                # print(type(str(m.group(3))))
                content_item['full_res'] = self.format_str_2_quote(self.strip_str_strict(one_line_match[6]))
                content_item['full_detail'] = self.strip_str_strict(one_line_match[9])

                save_label_temp = None
                # 这里对于空的处理
                if content_item['full_res']:
                    save_label_temp = content_item['full_res'].split(',')
                temp_label_s['labs'] = save_label_temp

                # print("item:%s", content_item)
                content_item_dict[content_item['code']] = content_item
                yield temp_label_s  # 返回本页面的所有标签，需要注意的是这里并没有进行处理，标签数据的处理会放在pipline中也就是说temp_label_s :{full_time:2019-03-02,labs:['5G','HUAWEI']}

            # print("page:%s content is %s has %s " % (response.url, totle_repley_contents, len(content_item_dict)))

            # print("page:%s content is %s has %s attack are : %s" % (
            #     response.url, totle_repley_contents, len(content_item_dict.values()), content_item_dict.values()))
            parsed_item_num = len(content_item_dict.values())
            if abs(line_count - parsed_item_num) >= 5:
                self.parse_less_than_know_urls.append(response.url)
                print("this page %s parsed items num is %s is less  than %s line" % (
                response.url, parsed_item_num, line_count))

            # print("page url : %s parsed _ item:%s"%(response.url,parsed_item_num))
            # 提取本页面不同的item
            for content_item in content_item_dict.values():
                yield content_item

            # 保存数据到文件
            html_content = Base_HTML_Content_Item()
            html_content['parsed_content'] = totle_repley_contents;
            html_content['pre_dir'] = title + pre_date
            yield html_content

    def closed(self, reason):
        print("已知的解析位置少了的元素:" % self.parse_less_than_know_urls)
        pass

    # 2019-2-19
    def get_date_from_str(self, date_s):
        dss = date_s.split("-")
        return date(int(dss[0]), int(dss[1]), int(dss[2]))

    def strip_str_strict(self, str):
        if str:
            return str.replace(" ", "")

    def format_str_2_quote(self, str):
        if str:
            return str.replace("+", ",").replace("、", ",") \
                .replace("+", "m").strip()
        # .replace("-",",")\
        return str
