import csv
from urllib.parse import urlparse, urljoin
import scrapy
from scrapy.spiders import Spider
from scrapy.crawler import CrawlerProcess
import signal
from ..settings import MY_USER_AGENT
from twisted.internet import reactor
from twisted.internet.error import DNSLookupError, TCPTimedOutError, TimeoutError
import subprocess
import requests

import re


"""
'''

def clean_text(text):
    #清理文本，保留小于等于20个字符的纯文本内容
    #text = re.sub(r'<[^>]+>', '', text)
    # segments = text.split('!')
    segments = text.split('\t')
    clean_segments = [
        seg.strip() for seg in segments
        # if len(seg.strip()) <= 20 and seg.strip() and seg.strip() != '\n'
        if seg.strip() and seg.strip() != '\n'

    ]
    return clean_segments


class DangSpider(Spider):
    name = "dang"

    custom_settings = {
        'USER_AGENT': 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.1)',
        'LOG_LEVEL': 'INFO',
        'CONCURRENT_REQUESTS': 8,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'RETRY_TIMES': 3,  # 不重试
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,
        },
    }

    def __init__(self, depth=2, *args, **kwargs):
        super(DangSpider, self).__init__(*args, **kwargs)
        self.depth = depth
        self.start_urls = []
        self.allowed_domains = set()
        self.company_urls = []
        self.load_urls_and_domains()
        self.load_blacklist()
        self.load_whitelist()
        self.load_target_keys()

        # 设置信号处理函数
        signal.signal(signal.SIGINT, self.handle_sigint)

    def load_urls_and_domains(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/url1.csv', 'r', encoding='utf-8') as f:
            reader = csv.DictReader(f)
            for row in reader:
                company_name = row['name']
                url = row['url']
                if not urlparse(url).scheme:
                    url = 'http://' + url
                self.company_urls.append((company_name, url))
                domain = urlparse(url).netloc
                self.allowed_domains.add(domain)
        self.allowed_domains = list(self.allowed_domains)

    def load_blacklist(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/black.txt', 'r', encoding='utf-8') as f:
            self.blacklist = [line.strip() for line in f]

    def load_whitelist(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/white.txt', 'r', encoding='utf-8') as f:
            self.whitelist = [line.strip() for line in f]

    def load_target_keys(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/key.txt', 'r', encoding='utf-8') as f:
            self.target_keys = [line.strip() for line in f]

    def start_requests(self):
        for company_name, url in self.company_urls:
            yield scrapy.Request(url=url, callback=self.parse_initial, errback=self.errorback,
                                 meta={'company_name': company_name})

    def parse_initial(self, response):
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)

        with open('D:/Courses/SmallThree/scrapy_spider/first_layer_links1.csv', 'a', newline='', encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': text_content})

        links = response.css('a::attr(href)').extract()
        if self.depth > 1:
            for link in links:
                full_url = urljoin(response.url, link)
                if self.should_follow_link(full_url):
                    yield scrapy.Request(url=full_url, callback=self.parse_second_layer, errback=self.errorback,
                                         meta={'company_name': company_name})
        #divs = response.css('div').extract()
        #clear_divs = clean_text(divs)
        #print(divs)
    def parse_second_layer(self, response):
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)

        with open('D:/Courses/SmallThree/scrapy_spider/second_layer_links1.csv', 'a', newline='', encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': text_content})

        links = response.css('a::attr(href)').extract()
        if self.depth > 2:
            for link in links:
                full_url = urljoin(response.url, link)
                if self.should_follow_link(full_url):
                    yield scrapy.Request(url=full_url, callback=self.parse_third_layer, errback=self.errorback,
                                         meta={'company_name': company_name})

    def parse_third_layer(self, response):
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)

        with open('D:/Courses/SmallThree/scrapy_spider/third_layer_links1.csv', 'a', newline='', encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': text_content})

    def extract_text(self, response):
        #clean_body = response.xpath('//div[not(ancestor::script) and not(ancestor::style)]').getall()
        clean_body = response.xpath('//body//text()[not(ancestor::script) and not(ancestor::style)]').getall()
        text_content = ' '.join(clean_body).strip()
        return text_content

    def should_follow_link(self, url):
        #判断是否应该跟随链接
        for term in self.blacklist:
            if term in url:
                return False
        for term in self.whitelist:
            if term in url:
                return True
        for term in self.target_keys:
            if term in url:
                return True
        return False




class DangSpider(Spider):
    name = "dang"

    custom_settings = {
        'USER_AGENT': MY_USER_AGENT,
            #'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.1) Microsoft Edge/126.0.2592.81',
            #


        'LOG_LEVEL': 'INFO',
        'CONCURRENT_REQUESTS': 8,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'RETRY_TIMES': 3,  # 不重试
        'DOWNLOAD_DELAY': 3,  # 下裁延迟固定值（秒）
        #'REQUEST_FINGERPRINTER_IMPLEMENTATION': 2.7,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,
        },
    }

    def __init__(self, depth=2, *args, **kwargs):
        super(DangSpider, self).__init__(*args, **kwargs)
        self.depth = depth
        self.start_urls = []
        self.allowed_domains = set()
        self.company_urls = []
        self.load_urls_and_domains()
        self.load_blacklist()
        self.load_whitelist()
        self.load_target_keys()

        # 设置信号处理函数
        signal.signal(signal.SIGINT, self.handle_sigint)

    def load_urls_and_domains(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/url1.csv', 'r', encoding='utf-8') as f:
            reader = csv.DictReader(f)
            for row in reader:
                company_name = row['name']
                url = row['url']
                if not urlparse(url).scheme:
                    url = 'https://' + str(url)
                self.company_urls.append((company_name, url))
                domain = urlparse(url).netloc
                self.allowed_domains.add(domain)
        self.allowed_domains = list(self.allowed_domains)

    def load_blacklist(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/black.txt', 'r', encoding='utf-8') as f:
            self.blacklist = [line.strip() for line in f]

    def load_whitelist(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/white.txt', 'r', encoding='utf-8') as f:
            self.whitelist = [line.strip() for line in f]

    def load_target_keys(self):
        with open('D:/Courses/SmallThree/scrapy_spider/warehouse/key.txt', 'r', encoding='utf-8') as f:
            self.target_keys = [line.strip() for line in f]

    def start_requests(self):
        #headers = {
            #'User-Agent': 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.1) Microsoft Edge/126.0.2592.81'}
        #division = "baike.baidu"

        for company_name, url in self.company_urls:
            yield scrapy.Request(url=url, callback=self.parse_initial, errback=self.errorback, meta={'company_name': company_name})

            '''
            if "baike.baidu" in url:
                # 创建Session对象。
                session_obj = requests.Session()
                # 使用Session对象向首页发送请求。
                # 服务端产生Cookie，自动将Cookie保存于Session对象中。
                session_obj.get(url=url, headers=headers)
                # 使用Session对象再次发送请求，此次请求中自动携带Cookie。
                response_obj = session_obj.get(url=url, headers=headers)
                yield scrapy.Request(url=url, callback=self.parse_initial, errback=self.errorback,
                                     meta={'company_name': company_name})
            else:
                yield scrapy.Request(url=url, callback=self.parse_initial, errback=self.errorback,
                                     meta={'company_name': company_name})

            
            r = requests.get(url, headers= headers, allow_redirects=False)
            if r.status_code == 200:
                yield scrapy.Request(url=url, callback=self.parse_initial, errback=self.errorback, meta={'company_name': company_name})
            else:
                real_url = r.headers['Location']
                url = url + real_url
                yield scrapy.Request(url=url, callback=self.parse_initial, errback=self.errorback, meta={'company_name': company_name})
            '''



    def parse_initial(self, response):
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)

        with open('D:/Courses/SmallThree/scrapy_spider/first_layer_links1.csv', 'a', newline='', encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content', 'text_connect']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': text_content})

        links = response.css('a::attr(href)').extract()
        if self.depth > 1:
            for link in links:
                full_url = urljoin(response.url, link)
                if self.should_follow_link(full_url):
                    yield scrapy.Request(url=full_url, callback=self.parse_second_layer, errback=self.errorback,
                                         meta={'company_name': company_name})

    def parse_second_layer(self, response):
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)

        with open('D:/Courses/SmallThree/scrapy_spider/second_layer_links1.csv', 'a', newline='', encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content', 'text_connect']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': text_content})

        links = response.css('a::attr(href)').extract()
        if self.depth > 2:
            for link in links:
                full_url = urljoin(response.url, link)
                if self.should_follow_link(full_url):
                    yield scrapy.Request(url=full_url, callback=self.parse_third_layer, errback=self.errorback, meta={'company_name': company_name})

    def parse_third_layer(self, response):
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)

        with open('D:/Courses/SmallThree/scrapy_spider/third_layer_links1.csv', 'a', newline='', encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content', 'text_connect']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': text_content})

    def extract_text(self, response):
        clean_body = response.xpath('//body//text()[not(ancestor::script) and not(ancestor::style)]').getall()
        text_content = ' '.join(clean_body).strip()
        return text_content

    def should_follow_link(self, url):
        #判断是否应该跟随链接
        for term in self.blacklist:
            if term in url:
                return False
        for term in self.whitelist:
            if term in url:
                return True
        for term in self.target_keys:
            if term in url:
                return True
        return False

    def handle_sigint(self, signum, frame):
        #处理中断信号
        # reactor.stop()  # 停止 Reactor
        pass

    def errorback(self, failure):
        self.logger.error(f"Error on request {failure.request.url}: {failure.value}. Skipping request.")
        return
    
    """
class DangSpider(Spider):
    name = "dang"

    custom_settings = {
        'USER_AGENT': 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.1)',
        'LOG_LEVEL': 'INFO',
        'CONCURRENT_REQUESTS': 8,
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'RETRY_TIMES': 3,  # 不重试
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,
        },
    }

    def __init__(self, depth=3, *args, **kwargs):
        super(DangSpider, self).__init__(*args, **kwargs)
        self.target_keys = None
        self.whitelist = None
        self.blacklist = None
        self.depth = depth
        self.start_urls = []
        self.allowed_domains = set()
        self.company_urls = []
        self.load_urls_and_domains()
        self.load_blacklist()
        self.load_whitelist()
        self.load_target_keys()

        # 设置信号处理函数
        signal.signal(signal.SIGINT, self.handle_sigint)

    def load_urls_and_domains(self):
        with open(r'D:/Courses/SmallThree/scrapy_spider/warehouse/url1.csv', 'r',encoding='utf-8') as f:
            reader = csv.DictReader(f)
            for row in reader:
                company_name = row['name']
                url = row['url']
                if not urlparse(url).scheme:
                    url = 'https://' + str(url)
                self.company_urls.append((company_name, url))
                domain = urlparse(url).netloc
                self.allowed_domains.add(domain)
        self.allowed_domains = list(self.allowed_domains)

    def load_blacklist(self):
        with open(r'D:/Courses/SmallThree/scrapy_spider/warehouse/black.txt', 'r',encoding='utf-8') as f:
            self.blacklist = [line.strip() for line in f]

    def load_whitelist(self):
        with open(r'D:/Courses/SmallThree/scrapy_spider/warehouse/white.txt', 'r',encoding='utf-8') as f:
            self.whitelist = [line.strip() for line in f]

    def load_target_keys(self):
        with open(r'D:/Courses/SmallThree/scrapy_spider/warehouse/key.txt', 'r',encoding='utf-8') as f:
            self.target_keys = [line.strip() for line in f]

    def start_requests(self):
        for company_name, url in self.company_urls:
            yield scrapy.Request(url=url, callback=self.parse_initial, errback=self.errorback, meta={'company_name': company_name})

    def extract_text(self, response):
        # 假设这是你从响应中提取文本的方法
        # 返回一个字符串，该字符串是文档的全文内容
        return response.text  # 或者使用其他方法来提取文本

    def clean_text_with_blacklist(self, text, blacklist):
        # 使用黑名单来清理文本内容
        pattern = r'\b(' + '|'.join(re.escape(term) for term in blacklist) + r')\b.*[.?!]'
        replaced_text = re.sub(pattern, '', text, flags=re.IGNORECASE | re.DOTALL)
        return replaced_text

    def clean_text_with_whitelist(self, text, whitelist):
            # 将文本分割成句子列表（这里假设句子由句号、问号和感叹号分隔）
            sentences = re.split(r'[.!?]+', text)

            # 过滤出包含白名单词汇的句子
            sentences_with_whitelist_words = [sentence.strip() for sentence in sentences if any(
                word.lower() in [w.lower() for w in whitelist] for word in re.findall(r'\b\w+\b', sentence))]

            return sentences_with_whitelist_words


    def parse_initial(self, response):
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)
        # 使用黑名单清理文本内容
        cleaned_text_content = self.clean_text_with_blacklist(text_content, self.blacklist)
        with open(r'D:/Courses/SmallThree/scrapy_spider/first_layer_links1.csv', 'a', newline='',
                  encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': cleaned_text_content})

        links = response.css('a::attr(href)').extract()
        if self.depth > 1:
            for link in links:
                full_url = urljoin(response.url, link)
                if self.should_follow_link(full_url):
                    yield scrapy.Request(url=full_url, callback=self.parse_second_layer, errback=self.errorback,
                                         meta={'company_name': company_name})
    def parse_second_layer(self, response):
        self.logger.info("Running parse_second_layer for URL: %s", response.url)
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)
        cleaned_text_content = self.clean_text_with_blacklist(text_content, self.blacklist)
        with open(r'D:/Courses/SmallThree/scrapy_spider/second_layer_links1.csv', 'a', newline='',encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content': text_content})

        '''  links = response.css('a::attr(href)').extract()
       if self.depth > 2:
            for link in links:
                full_url = urljoin(response.url, link)
                if self.should_follow_link(full_url):
                    yield scrapy.Request(url=full_url, callback=self.parse_third_layer, errback=self.errorback, meta={'company_name': company_name})
        '''
    def parse_third_layer(self, response):
        self.logger.info("Running parse_third_layer for URL: %s", response.url)
        company_name = response.meta['company_name']
        text_content = self.extract_text(response)
        with open(r'D:/Courses/SmallThree/scrapy_spider/third_layer_links1.csv', 'a', newline='',encoding='utf-8') as f:
            fieldnames = ['company_name', 'url', 'text_content']
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writerow({'company_name': company_name, 'url': response.url, 'text_content':text_content})

    def extract_text(self, response):
        clean_body = response.xpath('//body//text()[not(ancestor::script) and not(ancestor::style)]').getall()
        text_content = ' '.join(clean_body).strip()
        return text_content

    def should_follow_link(self, url):
        """判断是否应该跟随链接"""
        for term in self.blacklist:
            if term in url:
                return False
        for term in self.whitelist:
            if term in url:
                return True
        for term in self.target_keys:
            if term in url:
                return True
        return False

    def handle_sigint(self, signum, frame):
        """处理中断信号"""
        # reactor.stop()  # 停止 Reactor
        pass

    def errorback(self, failure):
        self.logger.error(f"Error on request {failure.request.url}: {failure.value}. Skipping request.")
        return
