DayF core  1.2.1.2
DayF (Decision at your Fingertips) is an AutoML freeware development framework that let developers works with Machine Learning models without any idea of AI, simply taking a csv dataset and the objective column
sparkmodelmetadata.py
1 
4 
5 '''
6 Copyright (C) e2its - All Rights Reserved
7  * Unauthorized copying of this file, via any medium is strictly prohibited
8  * Proprietary and confidential
9  *
10  * This file is part of gDayF project.
11  *
12  * Written by Jose L. Sanchez <e2its.es@gmail.com>, 2016-2019
13 '''
14 
15 from gdayf.models.modelmetadata import ModelMetadata
16 from gdayf.common.constants import *
17 from collections import OrderedDict
18 from time import time
19 
20 
21 
23 
24  def __init__(self, e_c):
25  ModelMetadata.__init__(self, e_c)
26  # @var _config
27  # Initialized _config to spark all models default values
28  self._optimizable_scale_params = self._config['spark']['conf']['optimizable_scale_params']
29  self._models = self._config['spark']['models']
30 
37  def generate_models(self, model_type, atype, amode=POC, increment=1):
38  if atype[0]['type'] == 'binomial':
39  distribution = 'binomial'
40  elif atype[0]['type'] == 'multinomial':
41  distribution = 'multinomial'
42  else:
43  distribution = 'default'
44  ts = round(time(), 0)
45  for each_model in self._models:
46  if each_model['model'] == model_type:
47  for key, value in each_model.items():
48  if key == 'parameters':
49  self.model[key] = OrderedDict()
50  for subkey, subvalue in value.items():
51  if subkey not in ['stopping', 'distribution', 'effort']:
52  for parm, parm_value in subvalue.items():
53  if parm_value['seleccionable']:
54  self.model[key][parm] = parm_value
55  elif subkey == 'stopping':
56  if amode in [POC, FAST, FAST_PARANOIAC]:
57  for parm, parm_value in subvalue.items():
58  if parm_value['seleccionable']:
59  self.model[key][parm] = parm_value
60  elif subkey == 'effort':
61  for parm, parm_value in subvalue.items():
62  if parm_value['seleccionable']:
63  self.model[key][parm] = parm_value
64  if isinstance(self.model[key][parm]['value'], list)\
65  and parm in self._optimizable_scale_params:
66  for counter in range(0, len(self.model[key][parm]['value'])):
67  self.model[key][parm]['value'][counter] = \
68  int(self.model[key][parm]['value'][counter] * increment)
69  elif self.model[key][parm]['type'] in DTYPES \
70  and parm in self._optimizable_scale_params:
71  if self.model[key][parm]['type'] in ITYPES:
72  self.model[key][parm]['value'] = \
73  int(self.model[key][parm]['value'] * increment)
74  else:
75  self.model[key][parm]['value'] *= increment
76  elif subkey == 'distribution':
77  for parm, parm_value in subvalue.items():
78  if parm_value['seleccionable']:
79  self.model[key][parm] = parm_value
80  if parm == 'modelType':
81  if distribution == 'binomial':
82  distribution = 'bernoulli'
83  elif distribution == 'multinomial':
84  distribution = 'multinomial'
85  if distribution in ['bernoulli', 'multinomial']:
86  self.model[key][parm]['value'] = distribution
87  self.model[key][parm]['type'] = list()
88  elif key == 'types':
89  self.model[key] = atype
90  else:
91  self.model[key] = value
92  try:
93  # Fijamos semilla
94  self.model['parameters']['seed']['value'] = int(ts)
95  except KeyError:
96  pass
97  return self.model
98 
99 
100 if __name__ == "__main__":
101  from json import dumps
102  m = sparkModelMetadata()
103  models = ['sparkDeepLearningEstimator', 'sparkGradientBoostingEstimator',
104  'sparkGeneralizedLinearEstimator', 'sparkRandomForestEstimator']
105  amodes = [POC, NORMAL]
106  atypes = [
107  [
108  {
109  "type": "binomial",
110  "active": True,
111  "valued": "enum"
112  }
113  ],
114  [
115  {
116  "type": "multinomial",
117  "active": True,
118  "valued": "enum"
119  }
120  ],
121  [
122  {
123  "type": "regression",
124  "active": True,
125  "valued": "float64"
126  }
127  ]
128  ]
129  for each_model in models:
130  for atype in atypes:
131  for amode in amodes:
132  modelbase = sparkModelMetadata()
133  print (amode)
134  print(dumps(modelbase.generate_models(each_model, atype, amode), indent=4))
def generate_models(self, model_type, atype, amode=POC, increment=1)
Generate spark models This method is used to load config parameters adapting its to specific analysis...
Generate spark Model base Class and initialize base members.
Define Base Model methods and members on an unified way.
Definition: modelmetadata.py:1
Generate Model base Class and base members.