/*
 * @Version: 0.0.1
 * @Author: ider
 * @Date: 2020-10-30 15:47:19
 * @LastEditors: ider
 * @LastEditTime: 2020-11-05 10:02:39
 * @Description:颠覆度所有文章进行计算
 */
package disruption

import (
	"bufio"
	"compress/gzip"
	"encoding/csv"
	"encoding/json"
	"io"
	"os"
	"strconv"
	"strings"
	"sync"
	"time"

	"github.com/emirpasic/gods/sets/hashset"
	log "github.com/sirupsen/logrus"
)

type NewChanObj struct {
	Key string
	S   *[]string
	AC  int
}

func init() {
	log.SetOutput(os.Stdout)
	// 设置日志级别为warn以上
}

/**
 * @description: 统计所有集合的节点情况，全部加载到内存，得到linksin,linksout,空间换时间
 */
func ReadAllLinksChanRef() (linksoutRef, linksinRef *map[string]*hashset.Set, autherCountMap *map[string]int) {
	linksoutRef = new(map[string]*hashset.Set)
	*linksoutRef = make(map[string]*hashset.Set)
	linksinRef = new(map[string]*hashset.Set)
	*linksinRef = make(map[string]*hashset.Set)
	autherCountMap = new(map[string]int)
	*autherCountMap = make(map[string]int)

	fi, err := os.Open("/home/ni/data/mag/all_refed_authors_data.txt")
	if err != nil {
		log.Printf("Error: %s\n", err)
		return
	}
	defer fi.Close()
	start_time := time.Now()
	br := bufio.NewReader(fi)

	ch := make(chan string, 1000)
	chOut := make(chan NewChanObj, 1000)
	const threaCount = 20
	var wg = sync.WaitGroup{}
	var wgOut = sync.WaitGroup{}
	wg.Add(threaCount)
	wgOut.Add(1)
	// 处理数据的协程
	for i := 0; i < threaCount; i++ {
		go func() {
			for a := range ch {
				strs := strings.Split(a, "\t")
				linksoutArray := []string{}
				var autherArray = []interface{}{}
				if len(strs) >= 3 {
					err := json.Unmarshal([]byte(strs[2]), &linksoutArray)
					if err != nil {
						log.Printf("Error: %s\n", err)
						log.Printf("%+v\n", string(a))
						return
					}
					err = json.Unmarshal([]byte(strs[3]), &autherArray)
					if err != nil {
						log.Printf("Error: %s\n", err)
						log.Printf("%+v\n", string(a))
						return
					}
				} else {
					log.Println("strs", strs)
				}
				// 不过滤linksout 数量
				// if len(linksoutArray) > 0 {
				co := NewChanObj{Key: strs[0], S: &linksoutArray, AC: len(autherArray)}
				chOut <- co
				// }
			}
			wg.Done()
		}()
	}
	// linksout保存到字典,linksin保存到字典
	go func() {
		for chobj := range chOut {
			// 保存作者数
			(*autherCountMap)[chobj.Key] = chobj.AC
			// 保存 linksout
			(*linksoutRef)[chobj.Key] = hashset.New()
			for _, s := range *chobj.S {
				(*linksoutRef)[chobj.Key].Add(s)
			}
			// save linksin
			for _, sid := range *chobj.S {
				if _, ok := (*linksinRef)[sid]; ok {
					// 存在
					(*linksinRef)[sid].Add(chobj.Key)
				} else {
					// 不存在
					(*linksinRef)[sid] = hashset.New(chobj.Key)
				}
			}
		}
		wgOut.Done()
	}()

	// 加载到队列
	// count := 0
	for {
		a, c := br.ReadString('\n')
		if c == io.EOF {
			break
		}
		ch <- a
		// count += 1
		// if count > 100000 {
		// 	break
		// }
	}
	close(ch)
	wg.Wait()
	close(chOut)
	wgOut.Wait()
	end_time := time.Now()
	log.Println("read all links:", end_time.Sub(start_time))
	return
}

func Mathod2_try() {
	linksoutRef, linksinRef, autherCountMap := ReadAllLinksChanRef()
	log.Println("linksoutRef", len(*linksoutRef))
	log.Println("linksinRef", len(*linksinRef))
	log.Println("auther exit count", len(*autherCountMap))
}

type transD struct {
	NodeId            string
	Node_linksout_set *hashset.Set
}

// 计算每一个page的颠覆度，文章必须有引用和被引用
func CalPageD() {
	linksoutRef, linksinRef, autherCountMap := ReadAllLinksChanRef()
	chin := make(chan transD, 1000)
	chout := make(chan []string, 1000)
	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		f, _ := os.Create("all_page_per_D_v2.gz")
		w := gzip.NewWriter(f)

		WriterCsv := csv.NewWriter(w)
		count := 0
		for strs := range chout {
			WriterCsv.Write(strs)
			count += 1
			if count%10000 == 0 {
				WriterCsv.Flush()
				log.Println("写入", count)
			}
		}
		WriterCsv.Flush()
		w.Close()
		wg.Done()
		log.Println("DONE")
	}()
	const threadCount = 40
	wgin := sync.WaitGroup{}
	wgin.Add(threadCount)
	for i := 0; i < threadCount; i++ {
		go func() {
			for obj := range chin {
				nodeId := obj.NodeId
				node_linksout_set := obj.Node_linksout_set
				// 找到被引用的集合，linksinSet：被引用的节点群
				node_linksin_set := (*linksinRef)[nodeId]
				// 没有 linksin，不计算
				if node_linksin_set == nil {
					continue
				}
				// 开始计算
				var (
					ni, nj, nk int64
				)
				// 计算 ni,nj
				for _, node_linksin_nodeId := range node_linksin_set.Values() {
					// 该节点的linksout
					node_linksin_node_linksout_set := (*linksoutRef)[node_linksin_nodeId.(string)]
					// 有交集则 nj,否则 ni
					if Is_intersection(node_linksin_node_linksout_set, node_linksout_set) {
						nj += 1
					} else {
						ni += 1
					}
				}
				// 计算 nk,找到 linkout 的所有 linksin
				node_linksout_node_linksin_set := hashset.New()
				for _, node_linksout_nodeId := range node_linksout_set.Values() {
					node_linksout_node_linksin_set.Add((*linksinRef)[node_linksout_nodeId.(string)].Values()...)
				}
				// 计算差集合
				nk = Cal_uniq_count(node_linksout_node_linksin_set, node_linksin_set)
				// 学科 id,ni,nj,nk,作者数，linksout size,linksin size
				chout <- []string{nodeId, strconv.FormatInt(ni, 10), strconv.FormatInt(nj, 10), strconv.FormatInt(nk, 10), strconv.Itoa((*autherCountMap)[nodeId]), strconv.Itoa(node_linksout_set.Size()), strconv.Itoa(node_linksin_set.Size())}
			}
			wgin.Done()
		}()
	}

	log.Println(len(*linksoutRef))
	// ks：中心节点，hset：引用的节点群
	for nodeId, node_linksout_set := range *linksoutRef {
		chin <- transD{NodeId: nodeId, Node_linksout_set: node_linksout_set}
	}
	close(chin)
	wgin.Wait()
	close(chout)
	wg.Wait()
}

// 计算是否有交集
func Is_intersection(a, b *hashset.Set) bool {
	for _, v := range a.Values() {
		if b.Contains(v.(string)) {
			return true
		}
	}
	return false
}

// 计算差
func Cal_uniq_count(a, b *hashset.Set) (count int64) {
	for _, v := range a.Values() {
		if !b.Contains(v.(string)) {
			count += 1
		}
	}
	return
}
