# Weiterverarbeitung des Wikipedia-Korpus

import nltk
from nltk import tokenize
from nltk.stem  import porter


import csv
import re

small_words = ["is","has","as","this","the","was"] # Liste von Wörtern, die nicht verändert werden sollen.

stemmer = porter.PorterStemmer()
lemmatizer = nltk.WordNetLemmatizer()

myfile = open("corpus.csv","r",encoding="utf8")
mycsvreader = csv.reader(myfile, delimiter='\t', quotechar='|')



nltk.help.upenn_tagset(".*") # Zeigt Erläuterungen zu den POS-tags an


counter =0

for row in mycsvreader:
    counter += 1
    if counter > 6: # Abbruchbedingung
        break
    if len(row) == 3 and row[-1] != "":
        text = row[-1]
        sents = tokenize.sent_tokenize(text)
        tokens = tokenize.word_tokenize(text)
        print(text)
        print("Info: Dieser Abschnitt besteht aus",len(tokens), "Tokens und", len(sents), "Sätzen.")
        print("Dies sind die Sätze:")
        for s in sents:
            print("***")  
            print(s)
        print("--------------------------------------------------------------------------------------")
        print("\n")            
        print("{:25} {:25} {:25} {:5}".format("word","stemmed","lemmatized","POS-tag"))
        print("--------------------------------------------------------------------------------------")
        for (word, tag) in nltk.pos_tag(tokens):
            if word not in small_words:
                stem = stemmer.stem(word)
                lem = lemmatizer.lemmatize(word)
            else:
                stem = word
                stem = lem             
            print("{:25} {:25} {:25} {:5}".format(word,stem,lem, tag))
        print("--------------------------------------------------------------------------------------")
        input()     # sorgt für eine Pause nach der Anzeige der Information zu einem Texteintrag.   
        

    
