#!/usr/bin/env python
# coding: utf-8
#
# All the IPython Notebooks in **[Python Natural Language Processing](https://github.com/milaan9/Python_Python_Natural_Language_Processing)** lecture series by **[Dr. Milaan Parmar](https://www.linkedin.com/in/milaanparmar/)** are available @ **[GitHub](https://github.com/milaan9)**
#
#
# # 01 Tokenization by Python
# - **Tokenization is a way of separating a piece of text into smaller units called tokens. Here, tokens can be either words, characters, or subwords.**
# - **Tokenization is the act of breaking up a sequence of strings into pieces such as words, keywords, phrases, symbols and other elements called tokens. Tokens can be individual words, phrases or even whole sentences. In the process of tokenization, some characters like punctuation marks are discarded.**
# In[1]:
# Split by Whitespace
import re
text = "I\'ll always be there with you forever in your heart.!"
words = re.split(r'\W+', text)
print(words[:100])
# Here It didn't recognise **`.`** at the last of the sentence.
# **Remove punctuations and separate the word**
# In[2]:
import string
import re
# split into words by white space
words = text.split()
# prepare regex for char filtering
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
stripped = [re_punc.sub('', w) for w in words]
print(stripped[:100])
# In[3]:
# string.printable inverse of string.punctuation
re_print = re.compile('[^%s]' % re.escape(string.printable))
result = [re_print.sub('', w) for w in words]
print(result)
# In[4]:
# Normalizing Case
# split into words by white space
words = text.split()
# convert to lower case
words = [word.lower() for word in words]
print(words[:100])
# # Spacy
#
# Spacy is an open-source software python library used in advanced natural language processing and machine learning. It will be used to build information extraction, natural language understanding systems, and to pre-process text for deep learning.
# Install:
# ```py
# !pip install -U spacy
# !python -m spacy download en_core_web_sm
# ```
#
# In[5]:
get_ipython().system('pip install -U spacy')
get_ipython().system('python -m spacy download en_core_web_sm')
import spacy
nlp = spacy.load('en_core_web_sm')
# Here en_core means core english,web_sm means small language,We import core english language here.
# This is spacy's internal english language
# In[6]:
string = '"I\'ll always be there with you forever in your mind!"'
print(string)
# In[7]:
# Here we will break the string into token and print in text
doc = nlp(string)
for token in doc:
print(token.text, end=' | ')
# In[8]:
doc
# In[9]:
# Here we break the string into unicode token
doc2 = nlp(u"I'm always here to help you all! Email:milaanparmar9@gmail.com or visit more at https://github.com/milaan9!")
for t in doc2:
print(t)
# In[10]:
doc3 = nlp(u'A 5km NYC cab ride costs $10.30')
for t in doc3:
print(t)
# In[11]:
doc4 = nlp(u"Let's visit St. Louis in the U.S. next year.")
for t in doc4:
print(t)
# **see the number of word used in the string**
# In[12]:
len(doc4)
# **see the number of vocabolary or character used in the string**
# In[13]:
# How many vocabolary present in token
len(doc.vocab)
# **Print the 3rd word from the string**
# In[14]:
doc5 = nlp(u'It is better to give than to receive.')
# Retrieve the third token:
doc5[2]
# **Print the words from 3rd word to 4th word**
# In[15]:
# Retrieve three tokens from the middle:
doc5[2:5]
# In[16]:
# Retrieve the last four tokens:
doc5[-4:]
# In[17]:
doc6 = nlp(u'My dinner was horrible.')
doc7 = nlp(u'Your dinner was delicious.')
# **We can't store any word from one line to the any word of other line/sentence**
# In[18]:
# Try to change "My dinner was horrible" to "My dinner was delicious"
doc6[3] = doc7[3]
# ### Explain which type of token is this
# In[19]:
# We can explain which type of token is this
doc8 = nlp(u'Apple to build a Hong Kong factory for $6 million')
for token in doc8:
print(token.text, end=' | ')
print('\n----')
# ents shows all entities present in all tokens
for ent in doc8.ents:
print(ent.text+' - '+ent.label_+' - '+str(spacy.explain(ent.label_)))
# Here it shows "Apple" may be a companies,agencies,institution.Similarly like Hong Kong may be a countries,cities.....etc
# In[20]:
len(doc8.ents)
# In[21]:
doc9 = nlp(u"Autonomous cars shift insurance liability toward manufacturers.")
# noun_chunks finds all noun present in all tokens
for chunk in doc9.noun_chunks:
print(chunk.text)
# In[22]:
doc10 = nlp(u"Red cars do not carry higher insurance rates.")
for chunk in doc10.noun_chunks:
print(chunk.text)
# In[23]:
doc11 = nlp(u"He was a one-eyed, one-horned, flying, purple people-eater.")
for chunk in doc11.noun_chunks:
print(chunk.text)
# In[24]:
# Here we can see how "displacy render" shows which type of token are these
# and relationships between them
from spacy import displacy
doc = nlp(u'Apple is going to build a U.K. factory for $6 million.')
displacy.render(doc, style='dep', jupyter=True, options={'distance': 110})
# In[25]:
# Showing types of token and there relationships in a line
doc = nlp(u'Over the last quarter Apple sold nearly 20 thousand iPods for a profit of $6 million.')
displacy.render(doc, style='ent', jupyter=True)
# In[26]:
doc = nlp(u'This is a sentence.')
displacy.serve(doc, style='dep')
# In-case ▶ runtime is too long press ■ Stop
# # **Tokenization - KN**
# In[27]:
get_ipython().system('pip install nltk')
# In[28]:
# Tokenization of paragraphs/sentences
import nltk
# nltk.download("popular") # use this to download all popular libraries in nltk
nltk.download('all')
# In[29]:
paragraph = """I have three visions for India. In 3000 years of our history, people from all over
the world have come and invaded us, captured our lands, conquered our minds.
From Alexander onwards, the Greeks, the Turks, the Moguls, the Portuguese, the British,
the French, the Dutch, all of them came and looted us, took over what was ours.
Yet we have not done this to any other nation. We have not conquered anyone.
We have not grabbed their land, their culture,
their history and tried to enforce our way of life on them.
Why? Because we respect the freedom of others.That is why my
first vision is that of freedom. I believe that India got its first vision of
this in 1857, when we started the War of Independence. It is this freedom that
we must protect and nurture and build on. If we are not free, no one will respect us.
My second vision for India’s development. For fifty years we have been a developing nation.
It is time we see ourselves as a developed nation. We are among the top 5 nations of the world
in terms of GDP. We have a 10 percent growth rate in most areas. Our poverty levels are falling.
Our achievements are being globally recognised today. Yet we lack the self-confidence to
see ourselves as a developed nation, self-reliant and self-assured. Isn’t this incorrect?
I have a third vision. India must stand up to the world. Because I believe that unless India
stands up to the world, no one will respect us. Only strength respects strength. We must be
strong not only as a military power but also as an economic power. Both must go hand-in-hand.
My good fortune was to have worked with three great minds. Dr. Vikram Sarabhai of the Dept. of
space, Professor Satish Dhawan, who succeeded him and Dr. Brahm Prakash, father of nuclear material.
I was lucky to have worked with all three of them closely and consider this the great opportunity of my life.
I see four milestones in my career"""
# In[30]:
# Tokenizing sentences
sentences = nltk.sent_tokenize(paragraph)
# Tokenizing words
words = nltk.word_tokenize(paragraph)
# In[31]:
sentences
# In[32]:
words
# In[ ]: