@@ -542,3 +542,67 @@ def test_index_offset():
542542 # Make sure translating between 1D and N-D indices are preserved
543543 assert_equal (_barnes_hut_tsne .test_index2offset (), 1 )
544544 assert_equal (_barnes_hut_tsne .test_index_offset (), 1 )
545+
546+
547+ def test_n_iter_without_progress ():
548+ # Make sure that the parameter n_iter_without_progress is used correctly
549+ random_state = check_random_state (0 )
550+ X = random_state .randn (100 , 2 )
551+ tsne = TSNE (n_iter_without_progress = 2 , verbose = 2 ,
552+ random_state = 0 , method = 'exact' )
553+
554+ old_stdout = sys .stdout
555+ sys .stdout = StringIO ()
556+ try :
557+ tsne .fit_transform (X )
558+ finally :
559+ out = sys .stdout .getvalue ()
560+ sys .stdout .close ()
561+ sys .stdout = old_stdout
562+
563+ # The output needs to contain the value of n_iter_without_progress
564+ assert ("did not make any progress during the "
565+ "last 2 episodes. Finished." in out )
566+
567+
568+ def test_min_grad_norm ():
569+ # Make sure that the parameter min_grad_norm is used correctly
570+ random_state = check_random_state (0 )
571+ X = random_state .randn (100 , 2 )
572+ min_grad_norm = 0.002
573+ tsne = TSNE (min_grad_norm = min_grad_norm , verbose = 2 ,
574+ random_state = 0 , method = 'exact' )
575+
576+ old_stdout = sys .stdout
577+ sys .stdout = StringIO ()
578+ try :
579+ tsne .fit_transform (X )
580+ finally :
581+ out = sys .stdout .getvalue ()
582+ sys .stdout .close ()
583+ sys .stdout = old_stdout
584+
585+ lines_out = out .split ('\n ' )
586+
587+ # extract the gradient norm from the verbose output
588+ gradient_norm_values = []
589+ for line in lines_out :
590+ # When the computation is Finished just an old gradient norm value
591+ # is repeated that we do not need to store
592+ if 'Finished' in line :
593+ break
594+
595+ start_grad_norm = line .find ('gradient norm' )
596+ if start_grad_norm >= 0 :
597+ line = line [start_grad_norm :]
598+ line = line .replace ('gradient norm = ' , '' )
599+ gradient_norm_values .append (float (line ))
600+
601+ # Compute how often the gradient norm is smaller than min_grad_norm
602+ gradient_norm_values = np .array (gradient_norm_values )
603+ n_smaller_gradient_norms = \
604+ len (gradient_norm_values [gradient_norm_values <= min_grad_norm ])
605+
606+ # The gradient norm can be smaller than min_grad_norm at most once,
607+ # because in the moment it becomes smaller the optimization stops
608+ assert_less_equal (n_smaller_gradient_norms , 1 )
0 commit comments