pull
Returns the last printed scoring grid. Use pull
function after any training function to store the scoring grid in pandas.DataFrame
.
Example
Copy # loading dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' )
# compare models
best_model = compare_models ()
# get the scoring grid
results = pull ()
Copy type (results)
# >>> pandas.core.frame.DataFrame
models
Return a table containing all the models available in the imported module of the model library.
Example
Copy # loading dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' )
# check model library
models ()
If you want to see a little more information than this, you can pass internal=True
.
Copy # loading dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' )
# check model library
models (internal = True )
get_config
This function retrieves the global variables created when initializing the setup function.
Example
Copy # load dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' )
# get X_train
get_config ( 'X_train' )
To check all accessible parameters with get_config
:
Copy # check all available param
get_config ()
Variables accessible by get_config
function:
'variable_and_property_keys'
set_config
This function resets the global variables.
Example
Copy # load dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' , session_id = 123 )
# reset environment seed
set_config ( 'seed' , 999 )
get_metrics
Returns the table of all the available metrics in the metric container. All these metrics are used for cross-validation.
Copy # load dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' , session_id = 123 )
# get metrics
get_metrics ()
add_metric
Adds a custom metric to the metric container.
Copy # load dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' , session_id = 123 )
# add metric
from sklearn . metrics import log_loss
add_metric ( 'logloss' , 'Log Loss' , log_loss, greater_is_better = False )
Now if you check metric container:
remove_metric
Removes a metric from the metric container.
Copy # remove metric
remove_metric ( 'logloss' )
No Output. Let's check the metric container again.
automl
This function returns the best model out of all trained models in the current setup based on the optimize
parameter. Metrics evaluated can be accessed using the get_metrics
function.
Example
Copy # load dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' )
# compare models
top5 = compare_models (n_select = 5 )
# tune models
tuned_top5 = [ tune_model (i) for i in top5]
# ensemble models
bagged_top5 = [ ensemble_model (i) for i in tuned_top5]
# blend models
blender = blend_models (estimator_list = top5)
# stack models
stacker = stack_models (estimator_list = top5)
# automl
best = automl (optimize = 'Recall' )
print (best)
get_logs
Returns a table of experiment logs. Only works when log_experiment = True
when initializing the setup function.
Example
Copy # load dataset
from pycaret . datasets import get_data
data = get_data ( 'diabetes' )
# init setup
from pycaret . classification import *
clf1 = setup (data, target = 'Class variable' , log_experiment = True , experiment_name = 'diabetes1' )
# compare models
top5 = compare_models ()
# check ML logs
get_logs ()
get_current_experiment
Obtain the current experiment object and return a class. This is useful when you are using a functional API and want to move to an OOP API.
Copy # loading dataset
from pycaret . datasets import get_data
data = get_data ( 'insurance' )
# init setup using functional API
from pycaret . regression import *
s = setup (data, target = 'charges' , session_id = 123 )
# compare models
best = compare_models ()
# return OOP class for current functional experiment
reg1 = get_current_experiment ()
set_current_experiment
Set the current experiment created using the OOP API to be used with the functional API.
Copy # loading dataset
from pycaret . datasets import get_data
data = get_data ( 'insurance' )
# init setup using OOP API
from pycaret . regression import RegressionExperiment
reg1 = RegressionExperiment ()
reg1 . setup (data, target = 'charges' , session_id = 123 )
# compare models
best = compare_models ()
# set OOP experiment as functional
set_current_experiment (reg1)