#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#文字色を変えるパターン
import csv
csv.field_size_limit(1000000000)
import warnings
warnings.filterwarnings("ignore")
import time
import re
import spacy
import pytextrank
import nltk
import random
random.seed(0)
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')

from nltk.stem.wordnet import WordNetLemmatizer as WNL
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from operator import itemgetter
from flask import Flask, render_template, request, g
app = Flask(__name__)
from elasticsearch import Elasticsearch

def my_index(l, x, default=0):
    if x in l:
        return l.index(x)
    else:
        return default

@app.route('/')
def form():
   return render_template('form.html')

#'テキストA', 'ラベル', 'テキストB', 'ID', 'BoW', '発行元', 'タイトル', '発行日'
@app.route('/elasticsearch', methods = ['POST', 'GET'])
def search():
    t1 = time.time()
    if request.method == 'POST':
        result = str(request.form['raw_text'])
        es = Elasticsearch(host = '153.120.135.103', port = 9200, http_auth = ('elastic', 'gui3DhVRfd9F18n30o34'))
        f_res = es.search(index = 'securityreports-2020-1203-2200', body = {
            '_source':['Label'],
            'size':1,
            'query':{'match':{'TextA': 'Mirai mirai'}}})
        for doc in f_res['hits']['hits']:
            dict1 = doc['_source']
            label = dict1['Label']
        res = es.search(index = 'securityreports-2020-1203-2200', body = {
            '_source':['Title', 'TextB', 'Publisher', 'TextA', 'BoW'],
            'size':10000,
            'query':{'match':{'TextA': 'Mirai mirai'}}})
        hit_num = res['hits']['total']['value']
        #print(hit_num)
        with open('psr1.csv', 'w', encoding='UTF-8', errors='ignore', newline='') as f:
            header_present = False
            for doc in res['hits']['hits']:
                my_dict = doc['_source']
                if not header_present:
                    w = csv.DictWriter(f, my_dict.keys())
                    w.writeheader()
                    header_present = True
                w.writerow(my_dict)
                #print(my_dict)
        csvfile = open('psr1.csv', encoding='UTF-8', errors='ignore')
        a_list = []
        b_list = []
        c_list = []
        title = []
        for row in csv.reader(csvfile):
            a_list.append(row[3]) #ステミングなしのテキストリスト
            b_list.append(row[1]) #ステミング済み改行記号で区切られたテキストリスト
        del a_list[0]
        del b_list[0]
        for i in b_list:
            k = i.split('/#')
            c_list.append(k) #センテンスリスト
        if len(a_list) >= 50:
            random_sents = random.sample(a_list, 50)
            reports = ' '.join(random_sents)
        else:
            reports = ' '.join(a_list)
        nlp = spacy.load('en_core_web_sm', disable = ['ner'])
        tr = pytextrank.TextRank()
        nlp.add_pipe(tr.PipelineComponent, name="textrank", last=True)
        doc = nlp(reports)
        #doc挿入
        limit_phrases = 15
        phrase_id = 0
        wnl = WNL()
        unit_vector = []
        phrases = []
        rank_vector = []
        for p in doc._.phrases:
            unit_vector.append(p.rank)
            phrases.append(p.text)
            phrase_id += 1
            if phrase_id == limit_phrases:
                break
        sum_ranks = sum(unit_vector)
        rank_vector = [ rank/sum_ranks for rank in unit_vector ]
        sent_list = []
        sent_rank = {}
        sent_id = 0
        sum_vec = 0.0
        for c in c_list:
            for sent in c:
                pnum = []
                score = 0
                for p in phrases:
                    pnum.append(sent.count(p))
                    score += (sent.count(p))*(unit_vector[phrases.index(p)])
                sent_rank[sent_id] = score
                sent_id += 1
                sent_list.append(pnum)
            #sent_listは各文のBoWベクトル
        sent_id = 0
        sent_text = {}
        sentence = []
        limit_sentences = 10
        for c in c_list:
            for sent in c:
                sent_text[sent_id] = sent
                sent_id += 1
        num_sent = 0
        for sent_id, rank in sorted(sent_rank.items(), key=itemgetter(1), reverse = True):
            sentence.append(sent_text[sent_id])
            num_sent += 1
            if num_sent == limit_sentences:
                break
        with open('sentence.txt', 'w', encoding='UTF-8', errors='ignore', newline='') as f:
            f.write('\n'.join(sentence))
        htmltext = []
        for s in sentence:
            sent_box = ''
            sent_box += s + ' '
            for p in phrases:
                senty = re.compile(p, re.IGNORECASE)
                sent_box = re.sub(senty, '<span style="font-weight: 600; color:orange;">' + p + '</span>', sent_box)
            for p in phrases:
                senty = re.compile(p, re.IGNORECASE)
                sent_box = re.sub(senty, '<span style="font-weight: 600; color:orange;">' + p + '</span>', sent_box)
            htmltext.append(sent_box)  
        t2 = time.time()
        print(t2-t1)  
        return render_template("search.html", label = label, sentence = sentence, hits = hit_num, phrase_list = phrases, relascore = rank_vector, html = htmltext)

@app.route('/nonchange', methods = ['POST', 'GET'])
def ten_phrases():
    t1 = time.time()
    #t1:スタート
    if request.method == 'POST':
        csvfile = open('psr1.csv', encoding='UTF-8', errors='ignore')
        a_list = []
        b_list = []
        c_list = []
        p_list = []
        s_list = []
        title = []
        BoW = []
        for row in csv.reader(csvfile):
            a_list.append(row[3])
            b_list.append(row[1])
            p_list.append(row[2])
            BoW.append(row[4]) #BOW
            title.append(row[0])
        del a_list[0]
        del b_list[0]
        del p_list[0]
        del BoW[0]
        del title[0]
        for i in b_list:
            k = i.split('/#')
            c_list.append(k)
        texts = []
        texts.extend(a_list)
        nlp = spacy.load('en_core_web_sm', disable = ['ner'])
        tr = pytextrank.TextRank()
        nlp.add_pipe(tr.PipelineComponent, name="textrank", last=True)
        t3 = time.time()
        with open('sentence.txt', encoding='UTF-8', errors='ignore') as f:
            s_list = [s.strip() for s in f.readlines()]
        result = request.form.getlist('sentences')
        result = [int(s) for s in result]
        sentence = []
        for i in result:
            sentence.append(s_list[i])
        raw_text = ' '.join(sentence)
        doc = nlp(raw_text)
        phrase_id = 0
        unit_vector = []
        phrases = []
        text_rank = {}
        htmltext = []
        title_list = []
        publish_list = []
        limit_phrases = 15
        for p in doc._.phrases:
            unit_vector.append(p.rank)
            phrases.append(p.text)
            phrase_id += 1
            if phrase_id == limit_phrases:
                break
        sum_ranks = sum(unit_vector)
        unit_vector = [ rank/sum_ranks for rank in unit_vector ]
        dic1 = []
        entexts = []
        with open('BOW辞書1.csv', encoding='UTF-8', errors='ignore') as file:
            for row in csv.reader(file):
                dic1.extend(row)
        for i in range(len(texts)):
            score = 0
            bow = BoW[i].split('/')
            for p in phrases:
                n = my_index(dic1, p)
                score += int(bow[n]) * int(unit_vector[phrases.index(p)])
            text_rank[i] = score/len(c_list)
        rank_list = sorted(text_rank.items(), key=itemgetter(1), reverse = True)
        for l in rank_list:
            entexts.append(c_list[l[0]])
            title_list.append(title[l[0]])
            publish_list.append(p_list[l[0]])
            if rank_list.index(l) == 9:
                break
        text_list =[]
        for i in entexts:
            sent_box = ''
            for sent in i:
                pnum = []
                score = 0
                for p in phrases:
                    pnum.append(sent.lower().count(p))
                if sum(pnum) == 0:
                    sent_box += sent + ' '
                elif sum(pnum) == 1:
                    sent_box += '<span style="background: linear-gradient(transparent 90%, #F6AD3C 0%);">' + sent + '</span>' + ' '
                elif sum(pnum) == 2:
                    sent_box += '<span style="background: linear-gradient(transparent 60%, #F6AD3C 0%);">' + sent + '</span>' + ' '
                else:
                    sent_box += '<span style="background: linear-gradient(transparent 0%, #F6AD3C 0%);">' + sent + '</span>' + ' '
                for p in phrases:
                    senty = re.compile(p, re.IGNORECASE)
                    sent_box = re.sub(senty, '<span style="font-weight: 600; color:red;">' + p + '</span>', sent_box)
                for p in phrases:
                    senty = re.compile(p, re.IGNORECASE)
                    sent_box = re.sub(senty, '<span style="font-weight: 600; color:red;">' + p + '</span>', sent_box)
            htmltext.append(sent_box)
        t2 = time.time()
        print(t2-t1)
        return render_template("nonchange.html", phrase_list = phrases, text_list = htmltext, title = title_list, publish = publish_list)

if __name__ == "__main__":
    app.run(host='153.120.135.103', port = 80, debug=True)