Thanks to visit codestin.com
Credit goes to www.scribd.com

0% found this document useful (0 votes)
7 views6 pages

Machine Learning

Uploaded by

jose girot
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
7 views6 pages

Machine Learning

Uploaded by

jose girot
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 6

numPy

1 import numpy as np

pandas
1 import pandas as pd

pyTorch
1 import torch

scipy
1 import scipy

matplotlib Seaborn
1 import matplotlib . pyplot as plt
2 import seaborn as sns

scikit-learn
1 from sklearn . mo de l _s el ec t io n import t r a in _ t e s t _ s p l i t
2 from sklearn . linear_model import L i n e a r R e g r e s s io n

tensorFlow
1 import tensorflow as tf
2 from tensorflow import keras

resta matrices

1
regresion
1 # Importar librerias
2 import seaborn as sns
3 import pandas as pd
4 from sklearn . linear_model import L i n e a r R e g r e s s io n
5 import numpy as np
6 from sklearn . metrics import r2_score
7 import matplotlib . pyplot as plt
8
9 # Cargar datos
10 df = pd . read_excel ( " d a t o s _ p r o d u c c i n . xlsx " )
11 df . head (2)
12
13 # Seleccionar variables
14 variable_x = " Horas Trabajadas "
15 variable_y = " Productos Terminados "
16
17 # Generar a n l i s i s
18 modelo = L i n e a r R e g r e s s i o n ()
19 modelo . fit ( df [[ variable_x ]] , df [ variable_y ])
20 print ( ’ E c u a c i n de la recta : y = ’ , round ( modelo . coef_ [0] ,3) , ’x +
’ , round ( modelo . intercept_ ,3) ) # obtenemos la e c u a c i n de la
recta
21 print ( ’ Coeficiente de c o r r e l a c i n : ’ , round ( np . corrcoef ( df [
variable_x ] , df [ variable_y ]) [0 ,1] , 3) ) # obtenemos el coeficiente
de c o r r e l a c i n
22 print ( ’ Coeficiente de d e t e r m i n a c i n : ’ , round ( r2_score ( df [
variable_y ] , modelo . predict ( df [[ variable_x ]]) ) , 3) ) # obtenemos
el coeficiente de d e t e r m i n a c i n
23 # G r f i c a con intervalo de confianza
24 sns . regplot ( x = df [ variable_x ] , y = df [ variable_y ] , ci =95 , line_kws ={ "
color " : " red " } , scatter_kws ={ " color " : " black " })
25 plt . title ( ’ R e g r e s i n lineal simple con intervalo de confianza al
95% ’)
26 plt . show ()
27
28 # Generar P R E D I C C I N
29 dato_ predict or = 15
30
31 h o r a s _ t r a b a j a d a s _ n u e v a s = pd . DataFrame ([ dato_pre dictor ] , columns =[
variable_x ]) # Convertir a DataFrame
32 a u t o s _ p r o d u c i d o s _ p r e d i c c i o n = modelo . predict (
horas_trabajadas_nuevas )
33 print ( ’ La p r e d i c c i n de autos producidos para ’ ,
h o r a s _ t r a b a j a d a s _ n u e v a s . iloc [0][0] , ’ horas trabajadas es ’ ,
round ( a u t o s _ p r o d u c i d o s _ p r e d i c c i o n [0] , 3) )

2
Kmeans
1 import matplotlib . pyplot as plt
2 import seaborn as sns
3 from sklearn . preprocessing import MinMaxScaler
4 from sklearn . cluster import KMeans
5
6 # Cargar el dataset
7 df = sns . load_dataset ( " car_crashes " )
8
9 # Inspeccionar las primeras filas
10 print ( df . head () )
11
12 # Crear el scatter plot inicial con matplotlib
13 plt . figure ( figsize =(8 , 6) )
14 plt . scatter ( df [ " ins_losses " ] , df [ " total " ] , alpha =0.7)
15 plt . xlabel ( " P r d i d a s de seguro ( ins_losses ) " )
16 plt . ylabel ( " Total de accidentes ( total ) " )
17 plt . title ( " R e l a c i n entre p r d i d a s de seguro y accidentes " )
18 plt . show ()
19
20 # T r a n s f o r m a c i n de los datos con MinMaxScaler
21 scaler = MinMaxScaler ( feature_range =(0 , 100) )
22 explicativas = scaler . fit_transform ( df [[ " ins_losses " , " total " ]])
23 print ( explicativas )
24
25 # Aplicar el modelo KMeans
26 model = KMeans ( n_clusters =3 , random_state =42) # Aseguramos
r e p r o d u c i b i l i da d
27 model . fit ( explicativas )
28 pred = model . predict ( explicativas )
29
30 # V i s u a l i z a c i n con seaborn
31 plt . figure ( figsize =(8 , 6) )
32 sns . scatterplot (
33 x = df [ " ins_losses " ] ,
34 y = df [ " total " ] ,
35 hue = pred ,
36 palette = " Set1 " ,
37 legend = " full " ,
38 )
39 plt . xlabel ( " P r d i d a s de seguro ( ins_losses ) " )
40 plt . ylabel ( " Total de accidentes ( total ) " )
41 plt . title ( " Clustering de accidentes y p r d i d a s de seguro " )
42 plt . legend ( title = " Cluster " )
43 plt . show ()

3
decisionTree
1 from sklearn . datasets import load_iris
2 from sklearn . tree import D e c i s i o n T r e e C l a s s i f i e r
3 import pandas as pd
4 import numpy as np
5 from sklearn import tree
6 import matplotlib . pyplot as plt
7 from IPython . display import Image
8 from pydotplus import g r a p h _ f r o m _ d o t _ d a t a
9 from sklearn . tree import ex p or t_ gr a ph vi z
10
11
12 # Load the iris dataset as an array
13 iris = load_iris ()
14
15 # Convert to dataframe for visualization and set column names
16 iris_df = pd . DataFrame (
17 data = iris . data ,
18 columns =[ ’ Sepal length ( cm ) ’ , ’ Sepal width ( cm ) ’ , ’ Petal length
( cm ) ’ , ’ Petal width ( cm ) ’]
19 )
20
21 # Display first 5 rows from dataframe
22 iris_df . head ()
23
24 X = iris . data [: , 2:]
25 y = iris . target
26
27 # Display X and y
28 iris_ df_slic ed = iris_df . iloc [: , 2:]
29 pd . DataFrame (
30 data = np . column_stack ([ iris_df_sliced , y ]) ,
31 columns =[ ’ Petal length ( cm ) ’ , ’ Petal width ( cm ) ’ , ’ Result ’]
32 )
33
34 # Now we train a decision tree classifier
35 tree_clf = D e c i s i o n T r e e C l a s s i f i e r ( max_depth =3)
36 tree_clf . fit (X , y )
37
38 # Hacer una p r e d i c c i n con el modelo entrenado
39 pred = tree_clf . predict ([[2.85 , 1.2]])
40
41 # Imprimir la p r e d i c c i n en formato n u m r i c o
42 print ( f " P r e d i c c i n n u m r i c a : { pred } " )
43
44 # Asignar nombres de clases
45 class_names = [ ’ Setosa ’ , ’ Versicolor ’ , ’ Virginica ’]
46
47 # Imprimir el nombre de la especie predicha
48 print ( f " La especie predicha es : { class_names [ pred [0]]} " )
49
50 tree . plot_tree ( tree_clf )
51
52 plt . show ()
53
54 dot_data = e xp or t _g ra ph v iz ( tree_clf ,
55 filled = True ,

4
56 rounded = True ,
57 class_names =[ ’ Setosa ’ ,
58 ’ Versicolor ’ ,
59 ’ Virginica ’] ,
60 feature_names =[ ’ Longitud de p t a l o ’ ,
61 ’ Ancho de P t a l o ’] ,
62 out_file = None )
63 graph = g r a p h _ f r o m _ d o t _ d a t a ( dot_data )
64 graph . write_png ( ’ tree . png ’)
65
66 Image ( filename = " tree . png " , width =600)

5
red neuronal
1 # Importar las l i b r e r a s necesarias
2 from sklearn . datasets import load_digits
3 from sklearn . mo de l _s el ec t io n import t r a in _ t e s t _ s p l i t
4 from sklearn . preprocessing import St andardS caler
5 from sklearn . ne ural_net work import MLPClassifier
6 from sklearn . metrics import classification_report , c o n f u s i o n _ m a t r i x
7
8 # Cargar el dataset ’ digits ’
9 digits = load_digits ()
10 X = digits . data
11 y = digits . target
12
13 # Dividir los datos en entrenamiento (70%) y prueba (30%)
14 X_train , X_test , y_train , y_test = t r a i n _ t e s t _ s p l i t (X , y , test_size
=0.3 , random_state =42)
15
16 # Escalar los datos para mejorar el rendimiento de la red neuronal
17 scaler = Standar dScaler ()
18 X_train = scaler . fit_transform ( X_train )
19 X_test = scaler . transform ( X_test )
20
21 # Crear e inicializar la red neuronal MLP
22 mlp = MLPClassifier ( h i d de n _ l a y e r _ s i z e s =(64 , 32) , max_iter =300 ,
activation = ’ relu ’ , solver = ’ adam ’ , random_state =42)
23
24 # Entrenar la red neuronal con los datos de entrenamiento
25 mlp . fit ( X_train , y_train )
26
27 # Predecir los valores con los datos de prueba
28 y_pred = mlp . predict ( X_test )
29
30 # Evaluar el modelo
31 print ( " Matriz de c o n f u s i n : " )
32 print ( c o n f u s i on _ m a t r i x ( y_test , y_pred ) )
33
34 print ( " \ nReporte de c l a s i f i c a c i n : " )
35 print ( c l a s s i f i c a t i o n _ r e p o r t ( y_test , y_pred ) )

You might also like