Mercurial > repos > bgruening > sklearn_to_categorical
comparison simple_model_fit.py @ 0:59e8b4328c82 draft
"planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/sklearn commit 208a8d348e7c7a182cfbe1b6f17868146428a7e2"
| author | bgruening | 
|---|---|
| date | Tue, 13 Apr 2021 22:40:10 +0000 | 
| parents | |
| children | f93f0cdbaf18 | 
   comparison
  equal
  deleted
  inserted
  replaced
| -1:000000000000 | 0:59e8b4328c82 | 
|---|---|
| 1 import argparse | |
| 2 import json | |
| 3 import pickle | |
| 4 | |
| 5 import pandas as pd | |
| 6 from galaxy_ml.utils import load_model, read_columns | |
| 7 from scipy.io import mmread | |
| 8 from sklearn.pipeline import Pipeline | |
| 9 | |
| 10 | |
| 11 N_JOBS = int(__import__("os").environ.get("GALAXY_SLOTS", 1)) | |
| 12 | |
| 13 | |
| 14 # TODO import from galaxy_ml.utils in future versions | |
| 15 def clean_params(estimator, n_jobs=None): | |
| 16 """clean unwanted hyperparameter settings | |
| 17 | |
| 18 If n_jobs is not None, set it into the estimator, if applicable | |
| 19 | |
| 20 Return | |
| 21 ------ | |
| 22 Cleaned estimator object | |
| 23 """ | |
| 24 ALLOWED_CALLBACKS = ( | |
| 25 "EarlyStopping", | |
| 26 "TerminateOnNaN", | |
| 27 "ReduceLROnPlateau", | |
| 28 "CSVLogger", | |
| 29 "None", | |
| 30 ) | |
| 31 | |
| 32 estimator_params = estimator.get_params() | |
| 33 | |
| 34 for name, p in estimator_params.items(): | |
| 35 # all potential unauthorized file write | |
| 36 if name == "memory" or name.endswith("__memory") or name.endswith("_path"): | |
| 37 new_p = {name: None} | |
| 38 estimator.set_params(**new_p) | |
| 39 elif n_jobs is not None and (name == 'n_jobs' or name.endswith('__n_jobs')): | |
| 40 new_p = {name: n_jobs} | |
| 41 estimator.set_params(**new_p) | |
| 42 elif name.endswith("callbacks"): | |
| 43 for cb in p: | |
| 44 cb_type = cb["callback_selection"]["callback_type"] | |
| 45 if cb_type not in ALLOWED_CALLBACKS: | |
| 46 raise ValueError("Prohibited callback type: %s!" % cb_type) | |
| 47 | |
| 48 return estimator | |
| 49 | |
| 50 | |
| 51 def _get_X_y(params, infile1, infile2): | |
| 52 """read from inputs and output X and y | |
| 53 | |
| 54 Parameters | |
| 55 ---------- | |
| 56 params : dict | |
| 57 Tool inputs parameter | |
| 58 infile1 : str | |
| 59 File path to dataset containing features | |
| 60 infile2 : str | |
| 61 File path to dataset containing target values | |
| 62 | |
| 63 """ | |
| 64 # store read dataframe object | |
| 65 loaded_df = {} | |
| 66 | |
| 67 input_type = params["input_options"]["selected_input"] | |
| 68 # tabular input | |
| 69 if input_type == "tabular": | |
| 70 header = "infer" if params["input_options"]["header1"] else None | |
| 71 column_option = params["input_options"]["column_selector_options_1"]["selected_column_selector_option"] | |
| 72 if column_option in [ | |
| 73 "by_index_number", | |
| 74 "all_but_by_index_number", | |
| 75 "by_header_name", | |
| 76 "all_but_by_header_name", | |
| 77 ]: | |
| 78 c = params["input_options"]["column_selector_options_1"]["col1"] | |
| 79 else: | |
| 80 c = None | |
| 81 | |
| 82 df_key = infile1 + repr(header) | |
| 83 df = pd.read_csv(infile1, sep="\t", header=header, parse_dates=True) | |
| 84 loaded_df[df_key] = df | |
| 85 | |
| 86 X = read_columns(df, c=c, c_option=column_option).astype(float) | |
| 87 # sparse input | |
| 88 elif input_type == "sparse": | |
| 89 X = mmread(open(infile1, "r")) | |
| 90 | |
| 91 # Get target y | |
| 92 header = "infer" if params["input_options"]["header2"] else None | |
| 93 column_option = params["input_options"]["column_selector_options_2"]["selected_column_selector_option2"] | |
| 94 if column_option in [ | |
| 95 "by_index_number", | |
| 96 "all_but_by_index_number", | |
| 97 "by_header_name", | |
| 98 "all_but_by_header_name", | |
| 99 ]: | |
| 100 c = params["input_options"]["column_selector_options_2"]["col2"] | |
| 101 else: | |
| 102 c = None | |
| 103 | |
| 104 df_key = infile2 + repr(header) | |
| 105 if df_key in loaded_df: | |
| 106 infile2 = loaded_df[df_key] | |
| 107 else: | |
| 108 infile2 = pd.read_csv(infile2, sep="\t", header=header, parse_dates=True) | |
| 109 loaded_df[df_key] = infile2 | |
| 110 | |
| 111 y = read_columns(infile2, | |
| 112 c=c, | |
| 113 c_option=column_option, | |
| 114 sep='\t', | |
| 115 header=header, | |
| 116 parse_dates=True) | |
| 117 if len(y.shape) == 2 and y.shape[1] == 1: | |
| 118 y = y.ravel() | |
| 119 | |
| 120 return X, y | |
| 121 | |
| 122 | |
| 123 def main(inputs, infile_estimator, infile1, infile2, out_object, out_weights=None): | |
| 124 """main | |
| 125 | |
| 126 Parameters | |
| 127 ---------- | |
| 128 inputs : str | |
| 129 File path to galaxy tool parameter | |
| 130 | |
| 131 infile_estimator : str | |
| 132 File paths of input estimator | |
| 133 | |
| 134 infile1 : str | |
| 135 File path to dataset containing features | |
| 136 | |
| 137 infile2 : str | |
| 138 File path to dataset containing target labels | |
| 139 | |
| 140 out_object : str | |
| 141 File path for output of fitted model or skeleton | |
| 142 | |
| 143 out_weights : str | |
| 144 File path for output of weights | |
| 145 | |
| 146 """ | |
| 147 with open(inputs, "r") as param_handler: | |
| 148 params = json.load(param_handler) | |
| 149 | |
| 150 # load model | |
| 151 with open(infile_estimator, "rb") as est_handler: | |
| 152 estimator = load_model(est_handler) | |
| 153 estimator = clean_params(estimator, n_jobs=N_JOBS) | |
| 154 | |
| 155 X_train, y_train = _get_X_y(params, infile1, infile2) | |
| 156 | |
| 157 estimator.fit(X_train, y_train) | |
| 158 | |
| 159 main_est = estimator | |
| 160 if isinstance(main_est, Pipeline): | |
| 161 main_est = main_est.steps[-1][-1] | |
| 162 if hasattr(main_est, "model_") and hasattr(main_est, "save_weights"): | |
| 163 if out_weights: | |
| 164 main_est.save_weights(out_weights) | |
| 165 del main_est.model_ | |
| 166 del main_est.fit_params | |
| 167 del main_est.model_class_ | |
| 168 if getattr(main_est, "validation_data", None): | |
| 169 del main_est.validation_data | |
| 170 if getattr(main_est, "data_generator_", None): | |
| 171 del main_est.data_generator_ | |
| 172 | |
| 173 with open(out_object, "wb") as output_handler: | |
| 174 pickle.dump(estimator, output_handler, pickle.HIGHEST_PROTOCOL) | |
| 175 | |
| 176 | |
| 177 if __name__ == "__main__": | |
| 178 aparser = argparse.ArgumentParser() | |
| 179 aparser.add_argument("-i", "--inputs", dest="inputs", required=True) | |
| 180 aparser.add_argument("-X", "--infile_estimator", dest="infile_estimator") | |
| 181 aparser.add_argument("-y", "--infile1", dest="infile1") | |
| 182 aparser.add_argument("-g", "--infile2", dest="infile2") | |
| 183 aparser.add_argument("-o", "--out_object", dest="out_object") | |
| 184 aparser.add_argument("-t", "--out_weights", dest="out_weights") | |
| 185 args = aparser.parse_args() | |
| 186 | |
| 187 main( | |
| 188 args.inputs, | |
| 189 args.infile_estimator, | |
| 190 args.infile1, | |
| 191 args.infile2, | |
| 192 args.out_object, | |
| 193 args.out_weights, | |
| 194 ) | 
