Source code for optuna.visualization._optimization_history
from__future__importannotationsfromcollections.abcimportCallablefromcollections.abcimportSequencefromenumimportEnumimportmathfromtypingimportcastfromtypingimportNamedTupleimportnumpyasnpfromoptuna.loggingimportget_loggerfromoptuna.samplers._baseimport_CONSTRAINTS_KEYfromoptuna.studyimportStudyfromoptuna.study._study_directionimportStudyDirectionfromoptuna.trialimportFrozenTrialfromoptuna.trialimportTrialStatefromoptuna.visualization._plotly_importsimport_importsfromoptuna.visualization._utilsimport_check_plot_argsif_imports.is_successful():fromoptuna.visualization._plotly_importsimportgo_logger=get_logger(__name__)class_ValueState(Enum):Feasible=0Infeasible=1Incomplete=2class_ValuesInfo(NamedTuple):values:list[float]stds:list[float]|Nonelabel_name:strstates:list[_ValueState]class_OptimizationHistoryInfo(NamedTuple):trial_numbers:list[int]values_info:_ValuesInfobest_values_info:_ValuesInfo|Nonedef_get_optimization_history_info_list(study:Study|Sequence[Study],target:Callable[[FrozenTrial],float]|None,target_name:str,error_bar:bool,)->list[_OptimizationHistoryInfo]:_check_plot_args(study,target,target_name)ifisinstance(study,Study):studies=[study]else:studies=list(study)info_list:list[_OptimizationHistoryInfo]=[]forstudyinstudies:trials=study.get_trials()label_name=target_nameiflen(studies)==1elsef"{target_name} of {study.study_name}"values=[]value_states=[]fortrialintrials:iftrial.state!=TrialState.COMPLETE:values.append(float("nan"))value_states.append(_ValueState.Incomplete)continueconstraints=trial.system_attrs.get(_CONSTRAINTS_KEY)ifconstraintsisNoneorall([x<=0.0forxinconstraints]):value_states.append(_ValueState.Feasible)else:value_states.append(_ValueState.Infeasible)iftargetisnotNone:values.append(target(trial))else:values.append(cast(float,trial.value))iftargetisnotNone:# We don't calculate best for user-defined target function since we cannot tell# which direction is better.best_values_info:_ValuesInfo|None=Noneelse:feasible_best_values=[]ifstudy.direction==StudyDirection.MINIMIZE:feasible_best_values=[vifs==_ValueState.Feasibleelsefloat("inf")forv,sinzip(values,value_states)]best_values=list(np.minimum.accumulate(feasible_best_values))else:feasible_best_values=[vifs==_ValueState.Feasibleelse-float("inf")forv,sinzip(values,value_states)]best_values=list(np.maximum.accumulate(feasible_best_values))best_label_name=("Best Value"iflen(studies)==1elsef"Best Value of {study.study_name}")best_values_info=_ValuesInfo(best_values,None,best_label_name,value_states)info_list.append(_OptimizationHistoryInfo(trial_numbers=[t.numberfortintrials],values_info=_ValuesInfo(values,None,label_name,value_states),best_values_info=best_values_info,))iflen(info_list)==0:_logger.warning("There are no studies.")feasible_trial_count=sum(info.values_info.states.count(_ValueState.Feasible)forinfoininfo_list)infeasible_trial_count=sum(info.values_info.states.count(_ValueState.Infeasible)forinfoininfo_list)iffeasible_trial_count+infeasible_trial_count==0:_logger.warning("There are no complete trials.")info_list.clear()ifnoterror_bar:returninfo_list# When error_bar=True, a list of 0 or 1 element is returned.iflen(info_list)==0:return[]iffeasible_trial_count==0:_logger.warning("There are no feasible trials.")return[]all_trial_numbers=[numberforinfoininfo_listfornumberininfo.trial_numbers]max_num_trial=max(all_trial_numbers)+1def_aggregate(label_name:str,use_best_value:bool)->tuple[list[int],_ValuesInfo]:# Calculate mean and std of values for each trial number.values:list[list[float]]=[[]for_inrange(max_num_trial)]states:list[list[_ValueState]]=[[]for_inrange(max_num_trial)]assertinfo_listisnotNonefortrial_numbers,values_info,best_values_infoininfo_list:ifuse_best_value:assertbest_values_infoisnotNonevalues_info=best_values_infoforn,v,sinzip(trial_numbers,values_info.values,values_info.states):ifnotmath.isinf(v):ifnotuse_best_valueands==_ValueState.Feasible:values[n].append(v)elifuse_best_value:values[n].append(v)states[n].append(s)trial_numbers_union:list[int]=[]value_states:list[_ValueState]=[]value_means:list[float]=[]value_stds:list[float]=[]foriinrange(max_num_trial):iflen(states[i])>0and_ValueState.Feasibleinstates[i]:value_states.append(_ValueState.Feasible)trial_numbers_union.append(i)value_means.append(np.mean(values[i]).item())value_stds.append(np.std(values[i]).item())else:value_states.append(_ValueState.Infeasible)returntrial_numbers_union,_ValuesInfo(value_means,value_stds,label_name,value_states)eb_trial_numbers,eb_values_info=_aggregate(target_name,False)eb_best_values_info:_ValuesInfo|None=NoneiftargetisNone:_,eb_best_values_info=_aggregate("Best Value",True)return[_OptimizationHistoryInfo(eb_trial_numbers,eb_values_info,eb_best_values_info)]
[docs]defplot_optimization_history(study:Study|Sequence[Study],*,target:Callable[[FrozenTrial],float]|None=None,target_name:str="Objective Value",error_bar:bool=False,)->"go.Figure":"""Plot optimization history of all trials in a study. Args: study: A :class:`~optuna.study.Study` object whose trials are plotted for their target values. You can pass multiple studies if you want to compare those optimization histories. target: A function to specify the value to display. If it is :obj:`None` and ``study`` is being used for single-objective optimization, the objective values are plotted. .. note:: Specify this argument if ``study`` is being used for multi-objective optimization. target_name: Target's name to display on the axis label and the legend. error_bar: A flag to show the error bar. Returns: A :class:`plotly.graph_objects.Figure` object. """_imports.check()info_list=_get_optimization_history_info_list(study,target,target_name,error_bar)return_get_optimization_history_plot(info_list,target_name)
def_get_optimization_history_plot(info_list:list[_OptimizationHistoryInfo],target_name:str,)->"go.Figure":layout=go.Layout(title="Optimization History Plot",xaxis={"title":"Trial"},yaxis={"title":target_name},)traces=[]fortrial_numbers,values_info,best_values_infoininfo_list:infeasible_trial_numbers=[nforn,sinzip(trial_numbers,values_info.states)ifs==_ValueState.Infeasible]ifvalues_info.stdsisNone:error_y=Nonefeasible_trial_numbers=[numfornum,sinzip(trial_numbers,values_info.states)ifs==_ValueState.Feasible]feasible_trial_values=[]fornuminfeasible_trial_numbers:feasible_trial_values.append(values_info.values[num])infeasible_trial_values=[]fornumininfeasible_trial_numbers:infeasible_trial_values.append(values_info.values[num])else:if(_ValueState.Infeasibleinvalues_info.statesor_ValueState.Incompleteinvalues_info.states):_logger.warning("Your study contains infeasible trials. ""In optimization history plot, ""error bars are calculated for only feasible trial values.")error_y={"type":"data","array":values_info.stds,"visible":True}feasible_trial_numbers=trial_numbersfeasible_trial_values=values_info.valuesinfeasible_trial_values=[]traces.append(go.Scatter(x=feasible_trial_numbers,y=feasible_trial_values,error_y=error_y,mode="markers",name=values_info.label_name,))ifbest_values_infoisnotNone:traces.append(go.Scatter(x=trial_numbers,y=best_values_info.values,name=best_values_info.label_name,mode="lines",))ifbest_values_info.stdsisnotNone:upper=np.array(best_values_info.values)+np.array(best_values_info.stds)traces.append(go.Scatter(x=trial_numbers,y=upper,mode="lines",line=dict(width=0.01),showlegend=False,))lower=np.array(best_values_info.values)-np.array(best_values_info.stds)traces.append(go.Scatter(x=trial_numbers,y=lower,mode="none",showlegend=False,fill="tonexty",fillcolor="rgba(255,0,0,0.2)",))traces.append(go.Scatter(x=infeasible_trial_numbers,y=infeasible_trial_values,error_y=error_y,mode="markers",name="Infeasible Trial",marker={"color":"#cccccc"},showlegend=False,))returngo.Figure(data=traces,layout=layout)