1
votes

POS tag filtering

# Dummy data

"Sukanya is getting married next year. " \ 
"Marriage is a big step in one’s life." \ 
"It is both exciting and frightening. " \ 
"But friendship is a sacred bond between people." \ 
"It is a special kind of love between us. " \ 
"Many of you must have tried searching for a friend "\ 
"but never found the right one."
import nltk 
from nltk.corpus import stopwords 
from nltk.tokenize import word_tokenize, sent_tokenize 
stop_words = set(stopwords.words('english'))

def get_pos_tags(text):
    tokenized = sent_tokenize(txt) 
    for i in tokenized: 

        # Word tokenizers is used to find the words  
        # and punctuation in a string 
        wordsList = nltk.word_tokenize(i) 

        # removing stop words from wordList 
        wordsList = [w for w in wordsList if not w in stop_words]  

        #  Using a Tagger. Which is part-of-speech  
        # tagger or POS-tagger.  
        tagged = nltk.pos_tag(wordsList) 

    return tagged

df["tagged"] = df["text"].apply(lambda x: get_pos_tags(x))

I have dataframe(df). Each row is a list of lists, with tuples inside.

Example row:

[[('Sukanya', 'NNP'), ('getting', 'VBG'), ('married', 'VBN'), ('next', 'JJ'), ('year', 'NN')],
[('Marriage', 'NN'), ('big', 'JJ'), ('step', 'NN'), ('one', 'CD'), ('’', 'NN'), ('life', 'NN')],
[('It', 'PRP'), ('exciting', 'VBG'), ('frightening', 'VBG')], 
[('But', 'CC'), ('friendship', 'NN'), ('sacred', 'VBD'), ('bond', 'NN'), ('people', 'NNS')], 
[('It', 'PRP'), ('special', 'JJ'), ('kind', 'NN'), ('love', 'VB'), ('us', 'PRP')], 
[('Many', 'JJ'), ('must', 'MD'), ('tried', 'VB'), ('searching', 'VBG'), ('friend', 'NN'), ('never','RB'),
 ('found', 'VBD'), ('right', 'RB'), ('one', 'CD')]]

Now I'm trying to filter the POS tags of adjective , noun, verb, adverb to a separate column filtered_tags

def filter_pos_tags(tagged_text):
    filtererd_tags = []
    for i in tagged_text:
        for j in i:
            if j[-1].startswith(("J", "V", "N", "R")): filtered_tags.append(j[0])
    return filtered_tags

df["filtered_tags"] = df["tagged"].apply(lambda x: get_pos_tags(x))

The Output I got:

['Sukanya', 'getting', 'married', 'next', 'year', 'Marriage', 'big', 'step', 'life', 'exciting', 'frightening', 'friendship', 'sacred', 'bond', 'people', 'special', 'kind', 'love', 'Many', 'tried', searching', 'friend', 'found', 'right']

Required Output

[['Sukanya', 'getting', 'married', 'next', 'year'], ['Marriage', 'big', 'step', 'life' ], ['exciting', 'frightening'], ['friendship', 'sacred', 'bond', 'people'], ['special', 'kind', 'love'], ['Many', 'tried', searching', 'friend'], ['found', 'right']]
2
Why are you calling get_pos_tags again? df["filtered_tags"] = df["tagged"].apply(lambda x: get_pos_tags(x)) Shouldn't you be calling filtered_pos_tags here? - oxalorg

2 Answers

2
votes

Give this a shot:

import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
from nltk.stem import WordNetLemmatizer

text = """Sukanya is getting married next year.
Marriage is a big step in one's life.
It is both exciting and frightening.
But friendship is a sacred bond between people.
It is a special kind of love between us.
Many of you must have tried searching for a friend
but never found the right one."""

stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
def get_pos_tags(text):
    tokenized = sent_tokenize(text)
    for i in tokenized:
        # Word tokenizers is used to find the words
        # and punctuation in a string
        wordsList = nltk.word_tokenize(i)
        # removing stop words from wordList
        wordsList = [w for w in wordsList if not w in stop_words]
        #  Using a Tagger. Which is part-of-speech
        # tagger or POS-tagger.
        tagged = nltk.pos_tag(wordsList, tagset='universal')
    return tagged

def get_filtered(tagged_text):
    valid_tags = set(['ADJ', 'NOUN', 'VERB', 'ADV'])
    filtered = filter(lambda word_entry : lemmatizer.lemmatize(word_entry[1]) in valid_tags, tagged_text)
    final = map(lambda match: match[0], filtered)
    return list(final)

df = pd.DataFrame({
    'text': text.split("\n")
})
df["tagged"] = df["text"].apply(lambda x: get_pos_tags(x))
df['filtered'] = df['tagged'].apply(get_filtered)
print(df['filtered'])

The output is:

0    [Sukanya, getting, married, next, year]
1                [Marriage, big, step, life]
2                    [exciting, frightening]
3         [friendship, sacred, bond, people]
4                      [special, kind, love]
5     [Many, must, tried, searching, friend]
6                      [never, found, right]
0
votes

If you change your function to add a list to filtered_tags when it goes through each item in tagged_text you could achieve what you expect.

Using the following filter_pos_tags() function instead of yours will make it work for you.

def filter_pos_tags(tagged_text):
    filtered_tags = []
    for index, i in enumerate(tagged_text):
        filtered_tags.append([])
        for j in i:
            #print(i,j)
            if j[-1].startswith(("J", "V", "N", "R")): filtered_tags[index].append(j[0])
    return filtered_tags

Note:

The example row you provided only has 6 elements where as in the dummy data there seems to be 7 sentences.