Python* API Reference for Intel® Data Analytics Acceleration Library 2020 Update 1

pca_metrics_dense_batch.py

1 # file: pca_metrics_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2020 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #===============================================================================
17 
18 
19 
20 
21 import os
22 import sys
23 
24 import daal.algorithms.pca as pca
25 import daal.algorithms.pca.quality_metric_set as quality_metric_set
26 from daal.algorithms.pca.quality_metric import explained_variance
27 from daal.data_management import (
28  DataSourceIface, FileDataSource, HomogenNumericTable, MergedNumericTable,
29  NumericTableIface, BlockDescriptor, readWrite
30 )
31 
32 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
33 if utils_folder not in sys.path:
34  sys.path.insert(0, utils_folder)
35 from utils import printNumericTable
36 
37 datasetFileName = os.path.join('..', 'data', 'batch', 'pca_normalized.csv')
38 nVectors = 1000
39 nComponents = 5
40 
41 qmsResult = None
42 eigenData = None
43 
44 def trainModel():
45  global eigenData
46 
47  # Initialize FileDataSource to retrieve the input data from a .csv file
48  dataSource = FileDataSource(
49  datasetFileName,
50  DataSourceIface.doAllocateNumericTable,
51  DataSourceIface.doDictionaryFromContext
52  )
53 
54  # Retrieve the data from the input file
55  dataSource.loadDataBlock(nVectors)
56 
57  # Create an algorithm for principal component analysis using the SVD method
58  algorithm = pca.Batch(method=pca.svdDense)
59 
60  # Set the algorithm input data
61  algorithm.input.setDataset(pca.data, dataSource.getNumericTable())
62 
63  # Compute results of the PCA algorithm
64  result = algorithm.compute()
65  eigenData = result.get(pca.eigenvalues)
66 
67 def testPcaQuality():
68  global qmsResult
69 
70  # Create a quality metric set object to compute quality metrics of the PCA algorithm
71  qualityMetricSet = quality_metric_set.Batch(nComponents)
72  explainedVariances = explained_variance.Input.downCast(qualityMetricSet.getInputDataCollection().getInput(quality_metric_set.explainedVariancesMetrics))
73  explainedVariances.setInput(explained_variance.eigenvalues, eigenData)
74 
75  # Compute quality metrics
76  qualityMetricSet.compute()
77 
78  # Retrieve the quality metrics
79  qmsResult = qualityMetricSet.getResultCollection()
80 
81 def printResults():
82  print ("Quality metrics for PCA")
83  result = explained_variance.Result.downCast(qmsResult.getResult(quality_metric_set.explainedVariancesMetrics))
84  printNumericTable(result.getResult(explained_variance.explainedVariances), "Explained variances:")
85  printNumericTable(result.getResult(explained_variance.explainedVariancesRatios), "Explained variances ratios:")
86  printNumericTable(result.getResult(explained_variance.noiseVariance), "Noise variance:")
87 
88 if __name__ == "__main__":
89  trainModel()
90  testPcaQuality()
91  printResults()

For more complete information about compiler optimizations, see our Optimization Notice.