Thanks to visit codestin.com
Credit goes to www.scribd.com

0% found this document useful (0 votes)
15 views11 pages

Ai Lab

The document contains multiple sets of Python code examples demonstrating various programming concepts, including checking for palindromes, calculating measures of central tendency, and implementing machine learning algorithms. Each set includes functions for tasks such as checking voting eligibility, finding Armstrong numbers, and performing linear regression. The examples cover a range of topics from basic input/output operations to more complex data analysis and visualization techniques.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
15 views11 pages

Ai Lab

The document contains multiple sets of Python code examples demonstrating various programming concepts, including checking for palindromes, calculating measures of central tendency, and implementing machine learning algorithms. Each set includes functions for tasks such as checking voting eligibility, finding Armstrong numbers, and performing linear regression. The examples cover a range of topics from basic input/output operations to more complex data analysis and visualization techniques.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 11

SET-1

1)A)
a=str(input("enter a: "))
b=a[::-1]
if a==b:
print("It is a palindrome")
else:
print("Not a palindrome")

Output :
enter a: RACECAR
It is a palindrome

B)

def remove_duplicates(input_list):

return list(set(input_list))

my_list = [1, 2, 2, 3, 4, 4, 5]

new_list = remove_duplicates(my_list)

print("List after removing duplicates:", new_list)

2)
With formula

from collections import Counter

data = [1, 2, 2, 3, 4, 4, 4, 5, 6]

mean = sum(data) / len(data)

sorted_data = sorted(data)

n = len(data)

if n % 2 == 0:

median = (sorted_data[n // 2 - 1] + sorted_data[n // 2]) / 2

else:

median = sorted_data[n // 2]

data_counts = Counter(data)

mode = data_counts.most_common(1)[0][0]

print(f"Data: {data}")

print("\nMeasures of Central Tendency:")

print(f"Mean: {mean}")
print(f"Median: {median}")

print(f"Mode: {mode}")

data_range = max(data) - min(data)

mean_diff_squared = []

for x in data:

mean_diff_squared.append((x - mean) ** 2)

variance = sum(mean_diff_squared) / len(data)

std_deviation = variance ** 0.5

print("\nMeasures of Variability:")

print(f"Range: {data_range}")

print(f"Variance: {variance}")

print(f"Standard Deviation: {std_deviation}")

Without Formula

import numpy as np

from scipy import stats

data = [1, 2, 2, 3, 4, 4, 4, 5, 6]

mean = np.mean(data)

median = np.median(data)

mode = stats.mode(data)

print(f"Data: {data}")

print("\nMeasures of Central Tendency:")

print(f"Mean: {mean}")

print(f"Median: {median}")

print(f"Mode: {mode.mode[0]} (appeared {mode.count[0]} times)")

data_range = np.ptp(data)

variance = np.var(data)

std_deviation = np.std(data)

print("\nMeasures of Variability:")

print(f"Range: {data_range}")

print(f"Variance: {variance}")

print(f"Standard Deviation: {std_deviation}")

SET-2
1)A)
def check_voting_eligibility():

age = int(input("Enter your age: "))

citizenship = input("Are you a citizen of the country? (yes/no): ").strip().lower()

if age >= 18 and citizenship == "yes":

print("You are eligible to vote.")

else:

print("You are not eligible to vote.")

check_voting_eligibility()

B) squares = [x**2 for x in range(1, 11)]

print ("List of squares from 1 to 10:", squares)

2)

import numpy as np

import matplotlib.pyplot as plt

from sklearn.datasets import make_blobs

X, y = make_blobs(n_samples=500, n_features=2, centers=3, random_state=23)

plt.figure(0)

plt.grid(True)

plt.scatter(X[:, 0], X[:, 1])

plt.show()

k=3

clusters = {}

np.random.seed(23)

for idx in range(k):

center = 2 * (2 * np.random.random((X.shape[1],)) - 1)

clusters[idx] = {'center': center, 'points': []}

plt.scatter(X[:, 0], X[:, 1])

plt.grid(True)

for i in clusters:

center = clusters[i]['center']

plt.scatter(center[0], center[1], marker='*', c='red')

plt.show()

def distance(p1, p2):

return np.sqrt(np.sum((p1 - p2) ** 2))


def assign_clusters(X, clusters):

for i in range(k):

clusters[i]['points'] = []

for idx in range(X.shape[0]):

dist = []

curr_x = X[idx]

for i in range(k):

dis = distance(curr_x, clusters[i]['center'])

dist.append(dis)

curr_cluster = np.argmin(dist)

clusters[curr_cluster]['points'].append(curr_x)

return clusters

def update_clusters(clusters):

for i in range(k):

points = np.array(clusters[i]['points'])

if points.shape[0] > 0:

new_center = points.mean(axis=0)

clusters[i]['center'] = new_center

clusters[i]['points'] = []

return clusters

def pred_cluster(X, clusters):

pred = []

for i in range(X.shape[0]):

dist = []

for j in range(k):

dist.append(distance(X[i], clusters[j]['center']))

pred.append(np.argmin(dist))

return pred

clusters = assign_clusters(X, clusters)

clusters = update_clusters(clusters)

pred = pred_cluster(X, clusters)

plt.scatter(X[:, 0], X[:, 1], c=pred)

for i in clusters:

center = clusters[i]['center']
plt.scatter(center[0], center[1], marker='^', c='red')

plt.show()

SET-3

1)

A)
def is_armstrong(number):
num_str = str(number)
num_digits = len(num_str)
armstrong_sum = sum(int(digit) ** num_digits for digit in num_str)
return armstrong_sum == number
num = int(input("Enter a number: "))
if is_armstrong(num):
print(num, "is an Armstrong number.")

else:
print(num, "is not an Armstrong number.")

B)
def find_max_in_list():

user_input = input("Enter numbers separated by spaces: ")

numbers = [int(num) for num in user_input.split()]

max_number = max(numbers)

print("The maximum number is:", max_number)

find_max_in_list()

2)

from sklearn.datasets import load_iris


from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, accuracy_score
iris = load_iris()
X = iris.data
Y = iris.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)

k=3
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
print("Accuracy of the model:", accuracy_score(Y_test, Y_pred) * 100)

SET-4

1)A)
def check_divisibility():

num = int(input("Enter an integer: "))


if num % 3 == 0 and num % 5 == 0:

print("The number is divisible by both 3 and 5.")

elif num % 3 == 0:

print("The number is divisible by 3.")

elif num % 5 == 0:

print("The number is divisible by 5.")

else:

print("The number is not divisible by 3 or 5.")

check_divisibility()

B)
def calculate_grade():

score = int(input("Enter the student's score (0–100): "))

if score < 0 or score > 100:

print("Invalid score. Please enter a number between 0 and 100.")

elif score >= 90:

print("Grade: A")

elif score >= 80:

print("Grade: B")

elif score >= 70:

print("Grade: C")

elif score >= 60:

print("Grade: D")

else:

print("Grade: F")

calculate_grade()

2)
import numpy as np

import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression

from sklearn.metrics import mean_squared_error

np.random.seed(0)

x = np.random.rand(10, 1) * 10

y = 2 * x.ravel() + np.random.randn(10) * 2
x_train, x_test = x[:6], x[6:]

y_train, y_test = y[:6], y[6:]

model = LinearRegression()

model.fit(x_train, y_train)

y_train_pred = model.predict(x_train)

y_test_pred = model.predict(x_test)

train_error = mean_squared_error(y_train, y_train_pred)

test_error = mean_squared_error(y_test, y_test_pred)

plt.figure(figsize=(10, 6))

plt.scatter(x_train, y_train, color='blue', label='Training Data')

plt.scatter(x_test, y_test, color='red', label='Testing Data')

plt.plot(x_train, y_train_pred, color='green', label='Linear Regression')

plt.title('Linear Regression Example')

plt.xlabel('x')

plt.ylabel('y')

plt.legend()

plt.show()

print('Training Predictions:')

for pred in y_train_pred:

print(f'{pred:.3f}')

print('\nTesting Predictions:')

for pred in y_test_pred:

print(f'{pred:.3f}')

print(f'Training Error (MSE): {train_error:.3f}')

print(f'Testing Error (MSE): {test_error:.3f}')

SET-5

1) A)
def check_leap_year():
year = int(input("Enter a year: "))
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
print(f"{year} is a leap year.")
else:
print(f"{year} is not a leap year.")
check_leap_year()

B)
def reverse_string():
user_input = input("Enter a string: ")
reversed_str = user_input[::-1]
print("Reversed string:", reversed_str)
reverse_string()

2)
import matplotlib.pyplot as plt
from scipy import stats
x=[1,2,3,4,5,6,7,8,9,10]
y=[10,20,30,40,60,70,80,90,140,160]
slope,intercept,r,p,std_err=stats.linregress(x,y)
def Bhagi(x):
return slope*x+intercept
mymodel=list(map(Bhagi,x))
plt.scatter(x,y)
plt.plot(x,mymodel)
plt.show()

SET-6

1)A)
def multiplication_table():

num = int(input("Enter a number: "))

print(f"\nMultiplication Table for {num}:")

for i in range(1, 11):

print(f"{num} x {i} = {num * i}")

multiplication_table()

B)
def count_numbers():
numbers = list(map(int, input("Enter numbers separated by spaces: ").split()))
positive_count = 0
negative_count = 0
zero_count = 0
for num in numbers:
if num > 0:
positive_count += 1
elif num < 0:
negative_count += 1
else:
zero_count += 1
print("\nResults:")
print("Positive numbers:", positive_count)
print("Negative numbers:", negative_count)
print("Zeros:", zero_count)
count_numbers()
2)
import numpy as np

import matplotlib.pyplot as plt

def GD(x, y):

m=c=0

learning_rate = 0.01

iterations = 10

n = len(x)

for _ in range(iterations):

y_pred = m * x + c

cost = (1/n) * sum((y - y_pred) ** 2)

m_gradient = -(2/n) * sum(x * (y - y_pred))

c_gradient = -(2/n) * sum(y - y_pred)

m -= learning_rate * m_gradient

c -= learning_rate * c_gradient

print('m:', m)

print('c:', c)

print('Cost:', cost)

print()

X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])

Y = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90])

GD(X, Y)

SET-7

1)
A) def classify_triangle():
a = float(input("Enter side 1: "))
b = float(input("Enter side 2: "))
c = float(input("Enter side 3: "))
if a + b > c and a + c > b and b + c > a:
if a == b == c:
print("The triangle is equilateral.")
elif a == b or b == c or a == c:
print("The triangle is isosceles.")
else:
print("The triangle is scalene.")
else:
print("The given sides do not form a valid triangle.")
classify_triangle()

B)
def count_vowels():
text = input("Enter a string: ")
vowels = "aeiouAEIOU"
count = 0
for char in text:
if char in vowels:
count += 1
print("Number of vowels:", count)
count_vowels()

2)
import numpy
from sklearn import linear_model
X = numpy.array([3.78, 2.44, 2.09, 0.14, 1.72, 1.65, 4.92, 4.37, 4.96, 4.52, 3.69,
5.88]).reshape(-1,1)
y = numpy.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
logr = linear_model.LogisticRegression()
logr.fit(X,y)
predicted = logr.predict(numpy.array([3.46]).reshape(-1,1))
print(predicted)

You might also like