# -*- coding: utf-8 -*-
import scrapy

from function.items import FunctionItem

import re
class FunctionsSpider(scrapy.Spider):
    name = 'functions'
    allowed_domains = ['www.runoob.com']
    start_urls = ['http://www.runoob.com/']

    all_function_name=[
		"fclose",
		"vsprintf",
		"fscanf",
		"getc",
		"fgets",
		"fgetc",
		"sscanf",
		"fprintf",
		"perror",
		"sprintf",
		"vprintf",
		"fputs",
		"scanf",
		"fputc",
		"vfprintf",
		"tmpnam",
		"puts",
		"printf",
		"putc",
		"gets",
		"setvbuf",
		"rewind",
		"fwrite",
		"remove",
		"fsetpos",
		"fread",
		"fopen",
		"fflush",
		"feof",
		"ungetc",
		"putchar",
		"getchar",
		"setbuf",
		"tmpfile",
		"rename",
		"ftell",
		"fseek",
		"freopen",
		"fgetpos",
		"ferror",
		"clearerr",
		"atof",
		"mbstowcs",
		"rand",
		"mblen",
		"abs",
		"labs",
		"bsearch",
		"getenv",
		"srand",
		"wctomb",
		"mbtowc",
		"wcstombs",
		"div",
		"ldiv",
		"system",
		"qsort",
		"exit",
		"abort",
		"realloc",
		"malloc",
		"strtod",
		"atol",
		"strtok",
		"strxfrm",
		"strrchr",
		"strlen",
		"strerror",
		"strncmp",
		"strtoul",
		"calloc",
		"free",
		"atoi",
		"atexit",
		"strtol",
		"memchr",
		"strstr",
		"strcspn",
		"strspn",
		"strcmp",
		"strpbrk",
		"strncpy",
		"memcmp",
		"strcat",
		"memmove",
		"memcpy",
		"mktime",
		"clock",
		"memset",
		"strcoll",
		"strncat",
		"strchr",
		"strcpy",
		"difftime",
		"time",
		"asctime",
		"gmtime",
		"pow",
		"log",
		"ldexp",
		"fmod",
		"fabs",
		"sin",
		"atan2",
		"cosh",
		"floor",
		"strftime",
		"ctime",
		"localtime",
		"acos",
		"modf",
		"ceil",
		"log10",
		"exp",
		"frexp",
		"tanh",
		"sinh",
		"asin",
		"atan",
		"sqrt",
		"raise",
		# "setjmp",
		"localeconv",
		"isgraph",
		"isalpha",
		"cos",
		"signal",
		"longjmp",
		"setlocale",
		"isalnum",
		"isdigit",
		"ispunct",
		"isprint",
		"islower",
		"isxdigit",
		"tolower",
		"toupper",
		"isupper",
		"isspace",
		"iscntrl",
		# "assert"
    ]

    def start_requests(self):
        urls = [['https://www.runoob.com/cprogramming/c-function-{}.html'.format(i),i] for i in self.all_function_name ]
        for url in urls:
            yield scrapy.Request(
                url = url[0],
                meta = {'name':url[1]},
                callback = self.parse_item
            )


    # start_urls = ['https://www.runoob.com/cprogramming/c-function-{}.html'.format(i) for i in self.all_function_name]


    def parse_item(self, response):
    	# name,belong,description,declaration,parameters,returned,example
        item = FunctionItem()
        # item['name'] = response.selector.xpath('//h1/span/text()').extract_first()
        item['name'] = response.meta['name'] #ok
        
        tmp = str(response.selector.xpath('//meta[@name="description"]/@content').extract_first()) #ok
        item['belong'] = re.findall('&lt;(.*?)&gt',tmp)[0]

        try:
            tmp = response.selector.xpath('string(//*[@id="content"]/h2[1]/following-sibling::p[1])').extract_first().strip().replace('"','""')  #ok
            item['description'] = re.findall(r'\)(.*)',tmp)[0].strip()

        except Exception:
        	print(item['name'])

        item['declaration'] = response.selector.xpath('string(//*[@id="content"]/h2[2]/following-sibling::pre[1])').extract_first().strip().replace('"','""')  #pre1

        item['parameters'] = response.selector.xpath('string(//*[@id="content"]/ul)').extract_first().strip().replace('"','""') #ok

        item['returned'] = response.selector.xpath('string(//*[@id="content"]/h2[4]/following-sibling::*[1])').extract_first().strip().replace('"','""')  #ok

        tmp1 = response.selector.xpath('string(//*[@id="content"]/pre[2])').extract_first().strip().replace('"','""').replace(u'\xa0',u' ') #pre2
        tmp2 = '让我们编译并运行上面的程序,这将产生以下结果:'
        tmp3 = response.selector.xpath('string(//*[@id="content"]/pre[3])').extract_first().strip().replace('"','""').replace(u'\xa0',u' ') #pre3
        item['example'] = tmp1 + '\n' + tmp2 + '\n' + tmp3

        yield item
        return item
