fromnltk.
corpus importstopwords
fromnltk.tokenize importword_tokenize
example_sent = """This is a sample sentence,
showing off the stop words filtration."""
# Download stopwords and punkt if not already downloaded
importnltk
nltk.download('stopwords')
nltk.download('punkt')
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(example_sent)
# Convert the words in word_tokens to lowercase and then check whether
they are present in stop_words or not
filtered_sentence = [w forw inword_tokens ifw.lower() notinstop_words]
# Without lowercase conversion
filtered_sentence_no_lower = [w forw inword_tokens ifw notinstop_words]
print("Original Sentence:", example_sent)
print("Word Tokens:", word_tokens)
print("Filtered Sentence (with lowercase conversion):",
filtered_sentence)
print("Filtered Sentence (without lowercase conversion):",
filtered_sentence_no_lower)
Output:
[nltk_data] Downloading package stopwords to /root/nltk_data... [nltk_data] Package
stopwords is already up-to-date! [nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Unzipping tokenizers/punkt.zip.
Original Sentence: This is a sample sentence, showing off the stop words filtration.
Word Tokens: ['This', 'is', 'a', 'sample', 'sentence', ',', 'showing', 'off', 'the', 'stop', 'words',
'filtration', '.']
Filtered Sentence (with lowercase conversion): ['sample', 'sentence', ',', 'showing', 'stop',
'words', 'filtration', '.']
Filtered Sentence (without lowercase conversion): ['This', 'sample', 'sentence', ',', 'showing',
'stop', 'words', 'filtration', '.']
# choose some words to be stemmed
words = ["program", "programs", "programmer", "programming",
"programmers"]
forw inwords:
print(w, ":", ps.ste# import these modules
fromnltk.stem importPorterStemmer
fromnltk.tokenize importword_tokenize
ps = PorterStemmer()m(w))
Output:
program : program
programs : program
programmer : programm
programming : program
programmers : programm
importnltk
fromnltk.corpus importstopwords
fromnltk.tokenize importword_tokenize, sent_tokenize
# Download necessary resources
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger') # Add this line to
download the missing resource
stop_words = set(stopwords.words('english'))
# Rest of your code remains unchanged
txt = "Sukanya, Rajib and Naba are my good friends. "\
"Sukanya is getting married next year. "\
"Marriage is a big step in one’s life."\
"It is both exciting and frightening. "\
"But friendship is a sacred bond between people."\
"It is a special kind of love between us. "\
"Many of you must have tried searching for a friend "\
"but never found the right one."
tokenized = sent_tokenize(txt)
fori intokenized:
wordsList = nltk.word_tokenize(i)
wordsList = [w.lower() forw inwordsList ifnotw.lower()
instop_words]
tagged = nltk.pos_tag(wordsList)
print(tagged)
Output:
[nltk_data] Downloading package stopwords to /root/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
[nltk_data] Downloading package punkt to /root/nltk_data...
[nltk_data] Package punkt is already up-to-date!
[nltk_data] Downloading package averaged_perceptron_tagger to
[nltk_data] /root/nltk_data...
[nltk_data] Unzipping taggers/averaged_perceptron_tagger.zip.
[('sukanya', 'NN'), (',', ','), ('rajib', 'VB'), ('naba', 'RB'), ('good', 'JJ'),
('friends', 'NNS'), ('.', '.')]
[('sukanya', 'NN'), ('getting', 'VBG'), ('married', 'VBN'), ('next', 'JJ'),
('year', 'NN'), ('.', '.')]
[('marriage', 'NN'), ('big', 'JJ'), ('step', 'NN'), ('one', 'CD'), ('’', 'NN'),
('life.it', 'NN'), ('exciting', 'VBG'), ('frightening', 'NN'), ('.', '.')]
[('friendship', 'NN'), ('sacred', 'VBD'), ('bond', 'NN'), ('people.it',
'NN'), ('special', 'JJ'), ('kind', 'NN'), ('love', 'VB'), ('us', 'PRP'), ('.',
'.')]
[('many', 'JJ'), ('must', 'MD'), ('tried', 'VB'), ('searching', 'VBG'),
('friend', 'NN'), ('never', 'RB'), ('found', 'VBD'), ('right', 'JJ'), ('one',
'CD'), ('.', '.')]
importmatplotlib.pyplot asplt
fromsklearn.datasets importmake_blobs
fromsklearn.model_selection importtrain_test_split
fromsklearn.neural_network importMLPClassifier
fromsklearn.metrics importaccuracy_score
n_samples = 200
blob_centers = ([1, 1], [3, 4], [1, 3.3], [3.5, 1.8])
data, labels = make_blobs(n_samples=n_samples, centers=blob_centers,
cluster_std=0.5, random_state=0)
colours = ('green', 'orange', 'blue', 'magenta')
fig, ax = plt.subplots()
forn_class inrange(len(blob_centers)):
ax.scatter(data[labels == n_class][:, 0], data[labels ==
n_class][:, 1], c=colours[n_class], s=30, label=str(n_class))
datasets = train_test_split(data, labels, test_size=0.2)
train_data, test_data, train_labels, test_labels = datasets
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(6,), random_state=1)
clf.fit(train_data, train_labels)
clf.score(train_data, train_labels)
predictions_train = clf.predict(train_data)
predictions_test = clf.predict(test_data)
train_score = accuracy_score(predictions_train, train_labels)
print("Score on train data:", train_score)
test_score = accuracy_score(predictions_test, test_labels)
print("Score on test data:", test_score)
predictions_train[:20]
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
y = [0, 0, 0, 1]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5,
2), random_state=1)
print(clf.fit(X, y))
print("Weights between input and first hidden layer:")
print(clf.coefs_[0])
print("\nWeights between first hidden and second hidden layer:")
print(clf.coefs_[1])
print("w0 =", clf.coefs_[0][0][0])
print("w1 =", clf.coefs_[0][1][0])
fori inrange(len(clf.coefs_)):
number_neurons_in_layer = clf.coefs_[i].shape[1]
forj inrange(number_neurons_in_layer):
weights = clf.coefs_[i][:, j]
print(i, j, weights, end=", ")
print()
print("\nBias values for first hidden layer:")
print(clf.intercepts_[0])
print("\nBias values for second hidden layer:")
print(clf.intercepts_[1])
result = clf.predict([[0, 0], [0, 1], [1, 0], [0, 1], [1, 1], [2., 2.],
[1.3, 1.3], [2, 4.8]])
prob_results = clf.predict_proba([[0, 0], [0, 1], [1, 0], [0, 1], [1,
1], [2., 2.], [1.3, 1.3], [2, 4.8]])
print(prob_results)
Score on train data: 0.99375
Score on test data: 0.925 MLPClassifier(alpha=1e-05,
hidden_layer_sizes=(5, 2), random_state=1, solver='lbfgs') Weights
between input and first hidden layer: [[-0.14203691 -1.18304359 -
0.85567518 -4.53250719 -0.60466275] [-0.69781111 -3.5850093 -
0.26436018 -4.39161248 0.06644423]] Weights between first hidden and
second hidden layer: [[ 0.29179638 -0.14155284] [ 4.02666592 -
0.61556475] [-0.51677234 0.51479708] [ 7.37215202 -0.31936965] [
0.32920668 0.64428109]] w0 = -0.14203691267827168 w1 = -
0.6978111149778693 0 0 [-0.14203691 -0.69781111], 0 1 [-1.18304359 -
3.5850093 ], 0 2 [-0.85567518 -0.26436018], 0 3 [-4.53250719 -
4.39161248], 0 4 [-0.60466275 0.06644423], 1 0 [ 0.29179638
4.02666592 -0.51677234 7.37215202 0.32920668], 1 1 [-0.14155284 -
0.61556475 0.51479708 -0.31936965 0.64428109], 2 0 [-4.96774269 -
0.86330397], Bias values for first hidden layer: [-0.14962269 -
0.59232707 -0.5472481 7.02667699 -0.87510813] Bias values for second
hidden layer: [-3.61417672 -0.76834882] [[1.00000000e+000
5.25723951e-101] [1.00000000e+000 3.71534882e-031] [1.00000000e+000
6.47069178e-029] [1.00000000e+000 3.71534882e-031] [2.07145538e-004
9.99792854e-001] [2.07145538e-004 9.99792854e-001] [2.07145538e-004
9.99792854e-001] [2.07145538e-004 9.99792854e-001]]
# Install NLTK
!pip install nltk
import nltk
from nltk.chat.util import Chat, reflections
# Define pairs of patterns and responses for the chatbot
pairs = [
r"my name is (.*)",
["Hello %1, how can I help you today?",]
],
r"what is your name?",
["My name is ChatBot and I'm here to assist you.",]
],
r"how are you ?",
["I'm doing well, thank you!", "I am always here to help.",]
],
r"sorry (.*)",
["No problem, please tell me how can I assist you?",]
],
r"quit",
["Bye! Take care. :)", "Goodbye, have a great day!"]
],
# Create a chatbot with the defined pairs
chatbot = Chat(pairs, reflections)
# Start chatting
print("Hi! I'm ChatBot. How can I assist you today?")
chatbot.converse()
Output:
Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-
packages (3.8.1) Requirement already satisfied: click in
/usr/local/lib/python3.10/dist-packages (from nltk) (8.1.7) Requirement
already satisfied: joblib in /usr/local/lib/python3.10/dist-packages
(from nltk) (1.3.2) Requirement already satisfied: regex>=2021.8.3 in
/usr/local/lib/python3.10/dist-packages (from nltk) (2023.12.25)
Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-
packages (from nltk) (4.66.1)
Hi! I'm ChatBot. How can I assist you today?
>what is your name?
My name is ChatBot and I'm here to assist you.
>how are you? I'm doing well, thank you!
>sorry
None
>sorry dear
No problem, please tell me how can I assist you?
>bye
None
>quit
Goodbye, have a great day!
import random
# Function to generate a random number between 1 and 100
def generate_number():
return random.randint(1, 100)
# Function to play the game
def play_game():
number_to_guess = generate_number()
attempts = 0
print("Welcome to the Guessing Game!")
print("I have chosen a number between 1 and 100. Try to guess
it.")
while True:
guess = input("Enter your guess (or 'exit' to quit): ")
if guess.lower() == "exit":
print("The number was:", number_to_guess)
print("Thanks for playing! Goodbye!")
break
try:
guess = int(guess)
attempts += 1
if guess < number_to_guess:
print("Too low! Try again.")
elif guess > number_to_guess:
print("Too high! Try again.")
else:
print(f"Congratulations! You guessed the number in
{attempts} attempts.")
break
except ValueError:
print("Invalid input. Please enter a number.")
# Start the game
play_game()
Output:
Welcome to the Guessing Game!
I have chosen a number between 1 and 100. Try to guess it.
Enter your guess (or 'exit' to quit): 35
Too low! Try again.
Enter your guess (or 'exit' to quit): 56
Too high! Try again.
Enter your guess (or 'exit' to quit): 33
Too low! Try again.
Enter your guess (or 'exit' to quit): 45
Too high! Try again.
Enter your guess (or 'exit' to quit): 40
Congratulations! You guessed the number in 5 attempts.