Python* API Reference for Intel® Data Analytics Acceleration Library 2020 Update 1

adagrad_opt_res_dense_batch.py

1 # file: adagrad_opt_res_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2020 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #===============================================================================
17 
18 #
19 # ! Content:
20 # ! Python example of the Adagrad algorithm
21 # !*****************************************************************************
22 
23 #
24 
25 
26 #
27 
28 import os
29 import sys
30 
31 import numpy as np
32 
33 import daal.algorithms.optimization_solver as optimization_solver
34 import daal.algorithms.optimization_solver.mse
35 import daal.algorithms.optimization_solver.adagrad
36 import daal.algorithms.optimization_solver.iterative_solver
37 from daal.data_management import (
38  DataSourceIface, FileDataSource, HomogenNumericTable, MergedNumericTable, NumericTableIface
39 )
40 
41 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
42 if utils_folder not in sys.path:
43  sys.path.insert(0, utils_folder)
44 from utils import printNumericTable
45 
46 datasetFileName = os.path.join('..', 'data', 'batch', 'mse.csv')
47 
48 nFeatures = 3
49 accuracyThreshold = 0.0000001
50 halfNIterations = 500
51 nIterations = halfNIterations * 2
52 batchSize = 1
53 learningRate = 1.0
54 
55 startPoint = np.array([[8], [2], [1], [4]], dtype=np.float64)
56 
57 if __name__ == "__main__":
58 
59  # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
60  dataSource = FileDataSource(datasetFileName,
61  DataSourceIface.notAllocateNumericTable,
62  DataSourceIface.doDictionaryFromContext)
63 
64  # Create Numeric Tables for data and values for dependent variable
65  data = HomogenNumericTable(nFeatures, 0, NumericTableIface.doNotAllocate)
66  dependentVariables = HomogenNumericTable(1, 0, NumericTableIface.doNotAllocate)
67  mergedData = MergedNumericTable(data, dependentVariables)
68 
69  # Retrieve the data from the input file
70  dataSource.loadDataBlock(mergedData)
71 
72  nVectors = data.getNumberOfRows()
73 
74  mseObjectiveFunction = optimization_solver.mse.Batch(nVectors)
75  mseObjectiveFunction.input.set(optimization_solver.mse.data, data)
76  mseObjectiveFunction.input.set(optimization_solver.mse.dependentVariables, dependentVariables)
77 
78  # Create objects to compute the Adagrad result using the default method
79  adagradAlgorithm = optimization_solver.adagrad.Batch(mseObjectiveFunction)
80 
81  # Set input objects for the the Adagrad algorithm
82  adagradAlgorithm.input.setInput(optimization_solver.iterative_solver.inputArgument, HomogenNumericTable(startPoint))
83  adagradAlgorithm.parameter.learningRate = HomogenNumericTable(1, 1, NumericTableIface.doAllocate, learningRate)
84  adagradAlgorithm.parameter.nIterations = halfNIterations
85  adagradAlgorithm.parameter.accuracyThreshold = accuracyThreshold
86  adagradAlgorithm.parameter.batchSize = batchSize
87  adagradAlgorithm.parameter.optionalResultRequired = True
88 
89  # Compute the Adagrad result
90  # Result class from daal.algorithms.optimization_solver.iterative_solver
91  res = adagradAlgorithm.compute()
92 
93  # Print computed the Adagrad result
94  printNumericTable(res.getResult(optimization_solver.iterative_solver.minimum), "Minimum after first compute():")
95  printNumericTable(res.getResult(optimization_solver.iterative_solver.nIterations), "Number of iterations performed:")
96 
97  adagradAlgorithm.input.setInput(optimization_solver.iterative_solver.inputArgument, res.getResult(optimization_solver.iterative_solver.minimum))
98  adagradAlgorithm.input.setInput(optimization_solver.iterative_solver.optionalArgument, res.getResult(optimization_solver.iterative_solver.optionalResult))
99 
100  res = adagradAlgorithm.compute()
101 
102  printNumericTable(res.getResult(optimization_solver.iterative_solver.minimum), "Minimum after second compute():")
103  printNumericTable(res.getResult(optimization_solver.iterative_solver.nIterations), "Number of iterations performed:")

For more complete information about compiler optimizations, see our Optimization Notice.