#!/usr/bin/env python # coding: utf-8 # # Welcome # #
Erik Arakelyan | #Nadav Borenstein | #Ruixiang Cui | #Karolina Stańczak | #
![]() |
# ![]() |
# ![]() |
# ![]() |
#
docker ps -q
docker exec -it _container-id_ _command_
docker exec -it 8c16b8de4771 python --version
#
#
# ### Managing your changes
#
# There are several ways to keep your changes within the official repo organised. Some of them are:
# * Create your own [fork](https://help.github.com/en/articles/fork-a-repo)
# of the repo. The fork can be [synced](https://help.github.com/en/articles/syncing-a-fork?query=f) with the official course repo when new changes are available. Meanwhile, you can also maintain your changes in your forked repo.
# * Another option is to keep your changes only in a local branch (git checkout -b _your-branch-name_
) on your computer. Each time there is a change in the course repo, you can pull the repo and merge the changes in your branch (git merge origin/master
)
# ----
#
# ## Tokenisation
# Tokenisation is an important pre-processing step for NLP models.
#
# You can tokenise text at different levels - split to sentences, tokens, subwords, etc.
#
# There are a lot of corner cases, language-specific and/or domain-specific cases, which have to handled in different ways.
#
# In[10]:
import re
text_sentences = "The office is open between 10 a.m. and 1 p.m. every day... Please, be respective of the hours."
re.split('(\.|!|\?)', text_sentences)
# Luckily, there are libraries providing tokenisation functionalities that handle most of the cases. Let's look two of the most common libraries for tokenisation:
#
# ### Spacy
# In[11]:
# download the language models, this can be done for other languages as well
get_ipython().system('python -m spacy download en_core_web_sm # You might have to restart the notebook if the file cannot be found')
get_ipython().system('python -m spacy download fr_core_news_sm')
# In[12]:
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(text_sentences)
list(doc.sents)
# ### NLTK
# In[13]:
import nltk
nltk.tokenize.sent_tokenize(text_sentences)
# #### Word-level tokenisation
# In[14]:
text = "Mr. O'Neill thinks that the boys' stories about Chile's capital aren't amusing... Good muffins cost $3.88 in New York. Please buy me two of them!!! Thanks.."
text_tweet = "https://t.co/9z2J3P33Uc Hey @NLPer! This is a #NLProc tweet :-D"
noisy_tweet = "UserAnonym123 What's your timezone_!@# !@#$%^&*()_+ 0123456"
print('Common English tokenisation')
print(nltk.word_tokenize(text))
print([token.text for token in nlp(text)])
print('\nTweet tokenisation')
print(nltk.word_tokenize(text_tweet))
print([token.text for token in nlp(text_tweet)])
print('\nTokenisation of a noisy tweet')
print(nltk.word_tokenize(noisy_tweet))
print([token.text for token in nlp(noisy_tweet)])
# Both libraries perform almost similar for tokenising English common text, so it depends which library you'll use for other features.
#
# When it comes to tweets, the nltk default tokeniser performs bad, but NLTK also provides the TweetTokenizer that is suited for tweet tokenisation.
# In[15]:
tweet_tokenizer = nltk.tokenize.TweetTokenizer()
print(tweet_tokenizer.tokenize(text_tweet))
print(tweet_tokenizer.tokenize(noisy_tweet))
# As you saw, the above tokenisers tokenise negation contractions like "are", "n't", which is per the the Penn Treebank guidelines. Such tokenisation can be useful when building sentiment classification or information extraction.
#
# Question:
# - How should we split "I bought a 12-ft boat!"? In 1, 2, or 3 tokens?
# - How should we tokenise "It is a 2850m distance flight.", "The maximum speed on the autobahn is 130km/h."?
#
# There is again a rule that units are split from numerical values. Let's test the performance of the tokenisers:
# In[16]:
print('Spacy tokeniser')
print([token.text for token in nlp("I bought a 12-ft boat!")])
print([token.text for token in nlp("It is a 2850m distance flight.")])
print([token.text for token in nlp("The maximum speed on the autobahn is 130km/h.")])
print('\nNLTK simple tokeniser')
print([nltk.tokenize.word_tokenize("I bought a 12-ft boat!")])
print([nltk.tokenize.word_tokenize("It is a 2850m distance flight.")])
print([nltk.tokenize.word_tokenize("The maximum speed on the autobahn is 130km/h.")])
# #### Language dependent tokenisation
#
# While some languages have similar rules for tokenisation, other languages are quite different.
# In French, words originally composed of more than one lexical unit that nowadays form a single lexical unit and should thus be recognized as a single token, where an apostrophe should be used to split the word in some cases, but not in all.
#
# The following sentence "On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.", which means "We are told that this is the case today, it still needs to be assessed." has the following correct tokenisation:
#
# 'On', 'nous', 'dit', 'qu’', 'aujourd’hui', 'c’', 'est', 'le', 'cas', ',', 'encore', 'faudra', '-t-il', 'l’', 'évaluer', '.'
#
# Explanation:
# - words originally composed of more than one lexical unit that nowadays form a single lexical unit and should thus be recognized as a single token like 'aujourd’hui'
# - qu’aujourd’hui (that today) - today is in contracted form (qu’) and has to be separated from the rest of the word
# - c'est (this is) is ce (C') combined with est and has to be split in two words
# - l’évaluer (evaluate it) is two words, where one is in contracted form and has to be separated
# - faudra-t-il (will it take) - consists of will (faudra), -t is used to prevent two vowels from clashing and should not be tokenised
# In[17]:
print([nltk.tokenize.word_tokenize("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.")])
print([token.text for token in nlp("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.")])
# Let's use the language-specific tokenisation:
# In[18]:
nlp_fr = spacy.load("fr_core_news_sm")
print([token.text for token in nlp_fr("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.")])
nltk.tokenize.word_tokenize("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.", language='french')
# ### Transformers
#
# [HuggingFace's](https://huggingface.co/docs/transformers/index) "transformers" is a python package for training, using and deploying Transformer-based models (more on that in future lectures). Each transformer model (e.g. BERT, RoBERTa) has its own tokenisation module that should be used together with the model. That is, to use the transformer model "BERT", one must tokenise its inputs with the BERT-tokeniser.
# In[19]:
from transformers import AutoTokenizer
bert_tokeniser = AutoTokenizer.from_pretrained("bert-base-uncased") # The tokeniser of the model "bert-base-uncased"
# In[20]:
tokens = bert_tokeniser.tokenize("The maximum speed on the autobahn is 130km/h.")
print(tokens)
# The "bert-base-uncased" tokeniser works differently from the tokenisers of nltk and spacy. Instead of splitting a sentence following a set of rules, it uses a (learned) vocabulary, a set of words that it knows. The tokeniser tries to break the sentence into tokens from its vocabulary. If the tokeniser encounters a work that does not appear in the vocabulary, the work will be split into "word-pieces", where each word piece belongs to the vocabulary. For example, the word "autobahn" is not part of the vocabulary, so the tokeniser split it into "auto" and "bahn" (the "##" means that "bahn" should be merged with the token that comes before it when reconstructing the original sentence from the tokens).
# In[21]:
print("autobahn" in bert_tokeniser.vocab)
print("auto" in bert_tokeniser.vocab)
print("bahn" in bert_tokeniser.vocab)
# "bert-base-uncased" is an English only model, so it can't deal well with sentences in other languages:
# In[22]:
tokens = bert_tokeniser.tokenize("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.")
print(tokens)
# CamemBERT, however, is a French language model, so it can tokenise the sentence in a more meaningful (but far from perfect) way.
# Here, '-' means that this token starts a new word.
# In[23]:
camembert_tokeniser = AutoTokenizer.from_pretrained("camembert-base")
tokens = camembert_tokeniser.tokenize("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.")
print(tokens)
# Some models are multilingual, and their tokenisers can process sentences from several languages. "bert-base-multilingual-uncased" (M-BERT) and "xlm-roberta-base" (XLM-RoBERTa) were trained on over 100 different languages!
# In[24]:
mbert_tokeniser = AutoTokenizer.from_pretrained("bert-base-multilingual-uncased")
en_tokens = mbert_tokeniser.tokenize("The maximum speed on the autobahn is 130km/h.")
fr_tokens = mbert_tokeniser.tokenize("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.")
heb_tokens = mbert_tokeniser.tokenize("אחד הדברים שאני הכי אוהב בקופנהגן זה מאפים עם הל")
print("en:", en_tokens)
print("fr:", fr_tokens)
print("heb:", heb_tokens)
# In[25]:
xlm_tokeniser = AutoTokenizer.from_pretrained("xlm-roberta-base")
en_tokens = xlm_tokeniser.tokenize("The maximum speed on the autobahn is 130km/h.")
fr_tokens = xlm_tokeniser.tokenize("On nous dit qu’aujourd’hui c’est le cas, encore faudra-t-il l’évaluer.")
heb_tokens = xlm_tokeniser.tokenize("אחד הדברים שאני הכי אוהב בקופנהגן זה מאפים עם הל")
print("en:", en_tokens)
print("fr:", fr_tokens)
print("heb:", heb_tokens)
# #### References:
# - Introduction to Spacy and its features: https://spacy.io/usage/spacy-101
# - NLTK tokenisation functionalities: https://www.nltk.org/api/nltk.tokenize.html
# - HuggingFace's transformers and tokenisers: https://huggingface.co/docs/transformers/main_classes/tokenizer
# - On rules and different languages: http://ceur-ws.org/Vol-2226/paper9.pdf
# - Why do we need language-specific tokenisation: https://stackoverflow.com/questions/17314506/why-do-i-need-a-tokenizer-for-each-language
# ---
# ## Introduction to PyTorch https://pytorch.org/
# In[26]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(123)
# ### Dataset and Bag-of-Words
# In[27]:
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
# Typically a common way to read in data in PyTorch is to use the following classes: `torch.utils.data.Dataset` and `torch.utils.data.DataLoader`. Since today we work with a tiny dataset, we omit this part.
#
# Now we represent the data as Bag-of-Words (BoW) which is a simple way of extracting features from text describing the occurrence of words within a document. Intuitevely, similar documents have similar content.
# In[28]:
# Function to map each word in the vocab to an unique integer
# Indexing the Bag of words vector
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
# In[29]:
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
# ### Custom Classifier
# Our classifier inherits from the `nn.Module` class which provides an interface to important methods used for constructing and working with our models.
#
# Here, we will implement a custom multi-layer feed forward neural network. In our example, we calculate:
# $$
# y = final(nonlinear(linear(BoW))
# $$
# where nonlinear denotes a non-linear function (we use [`nn.ReLU`](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html)) and the first and final layer are both linear functions implemented with`nn.Linear`. In practice, we create a `nn.Module` containing the definition of our model architecture in the `__init__` function. The `__forward__` function defines how tensors are processed by our model.
# In[30]:
class BoWClassifier(nn.Module):
def __init__(self, num_labels, vocab_size, num_hidden = 2):
# Calls the init function of nn.Module.
super(BoWClassifier, self).__init__()
# Define the parameters that you need.
self.linear = nn.Linear(vocab_size, num_hidden)
# non-linearity (here it is also a layer!)
self.nonlinear = nn.ReLU()
# final affine transformation
self.final = nn.Linear(num_hidden, num_labels)
def forward(self, bow_vec):
# Pass the input through the linear layer,
# then pass that through log_softmax.
# Many non-linearities and other functions are in torch.nn.functional
return self.final(self.nonlinear(self.linear(bow_vec)))
# In[31]:
# Functions to create BoW vectors
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
# The `BoWClassifier` (or any other module you will create) stores knowledge of the models's parameters.
# In[32]:
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE)
# The first output below is A, the second is b.
for param in model.parameters():
print(param)
# We run the model on the test data before we train to compare with the results from a trained model.
# In[33]:
with torch.no_grad():
for text, label in test_data:
bow_vec = make_bow_vector(text, word_to_ix)
log_probs = model(bow_vec)
print(log_probs)
# Print the matrix column corresponding to "creo"
print(next(model.parameters())[:, word_to_ix["creo"]])
# ### Training
# We set our loss function to cross entropy which combines `nn.LogSoftmax()` and `nn.NLLLoss()` (negative log likelihood) and calculate gradients with stochastic gradient descent.
#
# Usually we want to pass over the training data several times by setting a respective number of epochs. Since we have a tiny dataset, we will exaggerate with the number of epochs.
# In[34]:
loss_function = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# In[35]:
for epoch in range(200):
for instance, label in data:
# Step 1. Remember that PyTorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Step 2. Make our BOW vector and also we must wrap the target in a
# Tensor as an integer. For example, if the target is SPANISH, then
# we wrap the integer 0. The loss function then knows that the 0th
# element of the log probabilities is the log probability
# corresponding to SPANISH
bow_vec = make_bow_vector(instance, word_to_ix)
target = make_target(label, label_to_ix)
# Step 3. Run our forward pass.
log_probs = model(bow_vec)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
# ### Evaluation
# Let's see if our model can now predict more accurately, if a sentence is written in English or Spanish!
#
# Indeed, the log probability for Spanish is much higher for the first sentence, while the log probability for English is much higher for the second sentence in the test data!
# In[36]:
with torch.no_grad():
for instance, label in test_data:
bow_vec = make_bow_vector(instance, word_to_ix)
log_probs = model(bow_vec)
print(log_probs)
# In[37]:
print(next(model.parameters())[:, word_to_ix["creo"]])
# ### Useful links
# - PyTorch Tutorials:
# https://pytorch.org/tutorials/index.html
# - Introduction to Pytorch notebook from Stanford: https://nbviewer.jupyter.org/github/cgpotts/cs224u/blob/master/tutorial_pytorch.ipynb
# ----
# ## Course Project
# * **Group project**, can be completed in a group of up to 3 students
# * Released 1 September, hand-in 5 November 17:00
# * joint report, contribution of each student should be stated clearly
# * code as attachment
# * individual mark for each group member, based on the quality and quantity of their contributions
# * submission via Digital Exam
# * Assignment consists of several parts tied to weekly lecture topics
# * Finding a group:
# * deadline for group forming: **13 September 17:00**