Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

# 

# Licensed to the Apache Software Foundation (ASF) under one or more 

# contributor license agreements. See the NOTICE file distributed with 

# this work for additional information regarding copyright ownership. 

# The ASF licenses this file to You under the Apache License, Version 2.0 

# (the "License"); you may not use this file except in compliance with 

# the License. You may obtain a copy of the License at 

# 

# http://www.apache.org/licenses/LICENSE-2.0 

# 

# Unless required by applicable law or agreed to in writing, software 

# distributed under the License is distributed on an "AS IS" BASIS, 

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

# See the License for the specific language governing permissions and 

# limitations under the License. 

# 

 

import sys 

 

from pyspark.rdd import RDD 

from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper 

from pyspark.mllib.linalg import Matrix, _convert_to_vector 

from pyspark.mllib.regression import LabeledPoint 

from pyspark.mllib.stat.test import ChiSqTestResult, KolmogorovSmirnovTestResult 

 

 

__all__ = ['MultivariateStatisticalSummary', 'Statistics'] 

 

 

class MultivariateStatisticalSummary(JavaModelWrapper): 

 

""" 

Trait for multivariate statistical summary of a data matrix. 

""" 

 

def mean(self): 

return self.call("mean").toArray() 

 

def variance(self): 

return self.call("variance").toArray() 

 

def count(self): 

return int(self.call("count")) 

 

def numNonzeros(self): 

return self.call("numNonzeros").toArray() 

 

def max(self): 

return self.call("max").toArray() 

 

def min(self): 

return self.call("min").toArray() 

 

def normL1(self): 

return self.call("normL1").toArray() 

 

def normL2(self): 

return self.call("normL2").toArray() 

 

 

class Statistics(object): 

 

@staticmethod 

def colStats(rdd): 

""" 

Computes column-wise summary statistics for the input RDD[Vector]. 

 

Parameters 

---------- 

rdd : :py:class:`pyspark.RDD` 

an RDD[Vector] for which column-wise summary statistics 

are to be computed. 

 

Returns 

------- 

:class:`MultivariateStatisticalSummary` 

object containing column-wise summary statistics. 

 

Examples 

-------- 

>>> from pyspark.mllib.linalg import Vectors 

>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]), 

... Vectors.dense([4, 5, 0, 3]), 

... Vectors.dense([6, 7, 0, 8])]) 

>>> cStats = Statistics.colStats(rdd) 

>>> cStats.mean() 

array([ 4., 4., 0., 3.]) 

>>> cStats.variance() 

array([ 4., 13., 0., 25.]) 

>>> cStats.count() 

3 

>>> cStats.numNonzeros() 

array([ 3., 2., 0., 3.]) 

>>> cStats.max() 

array([ 6., 7., 0., 8.]) 

>>> cStats.min() 

array([ 2., 0., 0., -2.]) 

""" 

cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector)) 

return MultivariateStatisticalSummary(cStats) 

 

@staticmethod 

def corr(x, y=None, method=None): 

""" 

Compute the correlation (matrix) for the input RDD(s) using the 

specified method. 

Methods currently supported: `pearson (default), spearman`. 

 

If a single RDD of Vectors is passed in, a correlation matrix 

comparing the columns in the input RDD is returned. Use `method` 

to specify the method to be used for single RDD inout. 

If two RDDs of floats are passed in, a single float is returned. 

 

Parameters 

---------- 

x : :py:class:`pyspark.RDD` 

an RDD of vector for which the correlation matrix is to be computed, 

or an RDD of float of the same cardinality as y when y is specified. 

y : :py:class:`pyspark.RDD`, optional 

an RDD of float of the same cardinality as x. 

method : str, optional 

String specifying the method to use for computing correlation. 

Supported: `pearson` (default), `spearman` 

 

Returns 

------- 

:py:class:`pyspark.mllib.linalg.Matrix` 

Correlation matrix comparing columns in x. 

 

Examples 

-------- 

>>> x = sc.parallelize([1.0, 0.0, -2.0], 2) 

>>> y = sc.parallelize([4.0, 5.0, 3.0], 2) 

>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2) 

>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7 

True 

>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson") 

True 

>>> Statistics.corr(x, y, "spearman") 

0.5 

>>> from math import isnan 

>>> isnan(Statistics.corr(x, zeros)) 

True 

>>> from pyspark.mllib.linalg import Vectors 

>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]), 

... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])]) 

>>> pearsonCorr = Statistics.corr(rdd) 

>>> print(str(pearsonCorr).replace('nan', 'NaN')) 

[[ 1. 0.05564149 NaN 0.40047142] 

[ 0.05564149 1. NaN 0.91359586] 

[ NaN NaN 1. NaN] 

[ 0.40047142 0.91359586 NaN 1. ]] 

>>> spearmanCorr = Statistics.corr(rdd, method="spearman") 

>>> print(str(spearmanCorr).replace('nan', 'NaN')) 

[[ 1. 0.10540926 NaN 0.4 ] 

[ 0.10540926 1. NaN 0.9486833 ] 

[ NaN NaN 1. NaN] 

[ 0.4 0.9486833 NaN 1. ]] 

>>> try: 

... Statistics.corr(rdd, "spearman") 

... print("Method name as second argument without 'method=' shouldn't be allowed.") 

... except TypeError: 

... pass 

""" 

# Check inputs to determine whether a single value or a matrix is needed for output. 

# Since it's legal for users to use the method name as the second argument, we need to 

# check if y is used to specify the method name instead. 

if type(y) == str: 

raise TypeError("Use 'method=' to specify method name.") 

 

if not y: 

return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray() 

else: 

return callMLlibFunc("corr", x.map(float), y.map(float), method) 

 

@staticmethod 

def chiSqTest(observed, expected=None): 

""" 

If `observed` is Vector, conduct Pearson's chi-squared goodness 

of fit test of the observed data against the expected distribution, 

or against the uniform distribution (by default), with each category 

having an expected frequency of `1 / len(observed)`. 

 

If `observed` is matrix, conduct Pearson's independence test on the 

input contingency matrix, which cannot contain negative entries or 

columns or rows that sum up to 0. 

 

If `observed` is an RDD of LabeledPoint, conduct Pearson's independence 

test for every feature against the label across the input RDD. 

For each feature, the (feature, label) pairs are converted into a 

contingency matrix for which the chi-squared statistic is computed. 

All label and feature values must be categorical. 

 

Parameters 

---------- 

observed : :py:class:`pyspark.mllib.linalg.Vector` or \ 

:py:class:`pyspark.mllib.linalg.Matrix` 

it could be a vector containing the observed categorical 

counts/relative frequencies, or the contingency matrix 

(containing either counts or relative frequencies), 

or an RDD of LabeledPoint containing the labeled dataset 

with categorical features. Real-valued features will be 

treated as categorical for each distinct value. 

expected : :py:class:`pyspark.mllib.linalg.Vector` 

Vector containing the expected categorical counts/relative 

frequencies. `expected` is rescaled if the `expected` sum 

differs from the `observed` sum. 

 

Returns 

------- 

:py:class:`pyspark.mllib.stat.ChiSqTestResult` 

object containing the test statistic, degrees 

of freedom, p-value, the method used, and the null hypothesis. 

 

Notes 

----- 

`observed` cannot contain negative values 

 

Examples 

-------- 

>>> from pyspark.mllib.linalg import Vectors, Matrices 

>>> observed = Vectors.dense([4, 6, 5]) 

>>> pearson = Statistics.chiSqTest(observed) 

>>> print(pearson.statistic) 

0.4 

>>> pearson.degreesOfFreedom 

2 

>>> print(round(pearson.pValue, 4)) 

0.8187 

>>> pearson.method 

'pearson' 

>>> pearson.nullHypothesis 

'observed follows the same distribution as expected.' 

 

>>> observed = Vectors.dense([21, 38, 43, 80]) 

>>> expected = Vectors.dense([3, 5, 7, 20]) 

>>> pearson = Statistics.chiSqTest(observed, expected) 

>>> print(round(pearson.pValue, 4)) 

0.0027 

 

>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0] 

>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data)) 

>>> print(round(chi.statistic, 4)) 

21.9958 

 

>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])), 

... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])), 

... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])), 

... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])), 

... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])), 

... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),] 

>>> rdd = sc.parallelize(data, 4) 

>>> chi = Statistics.chiSqTest(rdd) 

>>> print(chi[0].statistic) 

0.75 

>>> print(chi[1].statistic) 

1.5 

""" 

if isinstance(observed, RDD): 

260 ↛ 261line 260 didn't jump to line 261, because the condition on line 260 was never true if not isinstance(observed.first(), LabeledPoint): 

raise ValueError("observed should be an RDD of LabeledPoint") 

jmodels = callMLlibFunc("chiSqTest", observed) 

return [ChiSqTestResult(m) for m in jmodels] 

 

if isinstance(observed, Matrix): 

jmodel = callMLlibFunc("chiSqTest", observed) 

else: 

if expected and len(expected) != len(observed): 

raise ValueError("`expected` should have same length with `observed`") 

jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected) 

return ChiSqTestResult(jmodel) 

 

@staticmethod 

def kolmogorovSmirnovTest(data, distName="norm", *params): 

""" 

Performs the Kolmogorov-Smirnov (KS) test for data sampled from 

a continuous distribution. It tests the null hypothesis that 

the data is generated from a particular distribution. 

 

The given data is sorted and the Empirical Cumulative 

Distribution Function (ECDF) is calculated 

which for a given point is the number of points having a CDF 

value lesser than it divided by the total number of points. 

 

Since the data is sorted, this is a step function 

that rises by (1 / length of data) for every ordered point. 

 

The KS statistic gives us the maximum distance between the 

ECDF and the CDF. Intuitively if this statistic is large, the 

probability that the null hypothesis is true becomes small. 

For specific details of the implementation, please have a look 

at the Scala documentation. 

 

 

Parameters 

---------- 

data : :py:class:`pyspark.RDD` 

RDD, samples from the data 

distName : str, optional 

string, currently only "norm" is supported. 

(Normal distribution) to calculate the 

theoretical distribution of the data. 

params 

additional values which need to be provided for 

a certain distribution. 

If not provided, the default values are used. 

 

Returns 

------- 

:py:class:`pyspark.mllib.stat.KolmogorovSmirnovTestResult` 

object containing the test statistic, degrees of freedom, p-value, 

the method used, and the null hypothesis. 

 

Examples 

-------- 

>>> kstest = Statistics.kolmogorovSmirnovTest 

>>> data = sc.parallelize([-1.0, 0.0, 1.0]) 

>>> ksmodel = kstest(data, "norm") 

>>> print(round(ksmodel.pValue, 3)) 

1.0 

>>> print(round(ksmodel.statistic, 3)) 

0.175 

>>> ksmodel.nullHypothesis 

'Sample follows theoretical distribution' 

 

>>> data = sc.parallelize([2.0, 3.0, 4.0]) 

>>> ksmodel = kstest(data, "norm", 3.0, 1.0) 

>>> print(round(ksmodel.pValue, 3)) 

1.0 

>>> print(round(ksmodel.statistic, 3)) 

0.175 

""" 

333 ↛ 334line 333 didn't jump to line 334, because the condition on line 333 was never true if not isinstance(data, RDD): 

raise TypeError("data should be an RDD, got %s." % type(data)) 

335 ↛ 336line 335 didn't jump to line 336, because the condition on line 335 was never true if not isinstance(distName, str): 

raise TypeError("distName should be a string, got %s." % type(distName)) 

 

params = [float(param) for param in params] 

return KolmogorovSmirnovTestResult( 

callMLlibFunc("kolmogorovSmirnovTest", data, distName, params)) 

 

 

def _test(): 

import doctest 

import numpy 

from pyspark.sql import SparkSession 

try: 

# Numpy 1.14+ changed it's string format. 

numpy.set_printoptions(legacy='1.13') 

except TypeError: 

pass 

globs = globals().copy() 

spark = SparkSession.builder\ 

.master("local[4]")\ 

.appName("mllib.stat.statistics tests")\ 

.getOrCreate() 

globs['sc'] = spark.sparkContext 

(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) 

spark.stop() 

360 ↛ 361line 360 didn't jump to line 361, because the condition on line 360 was never true if failure_count: 

sys.exit(-1) 

 

 

if __name__ == "__main__": 

_test()