Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

# 

# Licensed to the Apache Software Foundation (ASF) under one or more 

# contributor license agreements. See the NOTICE file distributed with 

# this work for additional information regarding copyright ownership. 

# The ASF licenses this file to You under the Apache License, Version 2.0 

# (the "License"); you may not use this file except in compliance with 

# the License. You may obtain a copy of the License at 

# 

# http://www.apache.org/licenses/LICENSE-2.0 

# 

# Unless required by applicable law or agreed to in writing, software 

# distributed under the License is distributed on an "AS IS" BASIS, 

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

# See the License for the specific language governing permissions and 

# limitations under the License. 

# 

 

import sys 

 

from pyspark import since, SparkContext 

from pyspark.sql.column import _to_seq, _to_java_column 

 

__all__ = ["Window", "WindowSpec"] 

 

 

def _to_java_cols(cols): 

sc = SparkContext._active_spark_context 

28 ↛ 29line 28 didn't jump to line 29, because the condition on line 28 was never true if len(cols) == 1 and isinstance(cols[0], list): 

cols = cols[0] 

return _to_seq(sc, cols, _to_java_column) 

 

 

class Window(object): 

""" 

Utility functions for defining window in DataFrames. 

 

.. versionadded:: 1.4 

 

Notes 

----- 

When ordering is not defined, an unbounded window frame (rowFrame, 

unboundedPreceding, unboundedFollowing) is used by default. When ordering is defined, 

a growing window frame (rangeFrame, unboundedPreceding, currentRow) is used by default. 

 

Examples 

-------- 

>>> # ORDER BY date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW 

>>> window = Window.orderBy("date").rowsBetween(Window.unboundedPreceding, Window.currentRow) 

 

>>> # PARTITION BY country ORDER BY date RANGE BETWEEN 3 PRECEDING AND 3 FOLLOWING 

>>> window = Window.orderBy("date").partitionBy("country").rangeBetween(-3, 3) 

""" 

 

_JAVA_MIN_LONG = -(1 << 63) # -9223372036854775808 

_JAVA_MAX_LONG = (1 << 63) - 1 # 9223372036854775807 

_PRECEDING_THRESHOLD = max(-sys.maxsize, _JAVA_MIN_LONG) 

_FOLLOWING_THRESHOLD = min(sys.maxsize, _JAVA_MAX_LONG) 

 

unboundedPreceding = _JAVA_MIN_LONG 

 

unboundedFollowing = _JAVA_MAX_LONG 

 

currentRow = 0 

 

@staticmethod 

@since(1.4) 

def partitionBy(*cols): 

""" 

Creates a :class:`WindowSpec` with the partitioning defined. 

""" 

sc = SparkContext._active_spark_context 

jspec = sc._jvm.org.apache.spark.sql.expressions.Window.partitionBy(_to_java_cols(cols)) 

return WindowSpec(jspec) 

 

@staticmethod 

@since(1.4) 

def orderBy(*cols): 

""" 

Creates a :class:`WindowSpec` with the ordering defined. 

""" 

sc = SparkContext._active_spark_context 

jspec = sc._jvm.org.apache.spark.sql.expressions.Window.orderBy(_to_java_cols(cols)) 

return WindowSpec(jspec) 

 

@staticmethod 

def rowsBetween(start, end): 

""" 

Creates a :class:`WindowSpec` with the frame boundaries defined, 

from `start` (inclusive) to `end` (inclusive). 

 

Both `start` and `end` are relative positions from the current row. 

For example, "0" means "current row", while "-1" means the row before 

the current row, and "5" means the fifth row after the current row. 

 

We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, 

and ``Window.currentRow`` to specify special boundary values, rather than using integral 

values directly. 

 

A row based boundary is based on the position of the row within the partition. 

An offset indicates the number of rows above or below the current row, the frame for the 

current row starts or ends. For instance, given a row based sliding frame with a lower bound 

offset of -1 and a upper bound offset of +2. The frame for row with index 5 would range from 

index 4 to index 7. 

 

.. versionadded:: 2.1.0 

 

Parameters 

---------- 

start : int 

boundary start, inclusive. 

The frame is unbounded if this is ``Window.unboundedPreceding``, or 

any value less than or equal to -9223372036854775808. 

end : int 

boundary end, inclusive. 

The frame is unbounded if this is ``Window.unboundedFollowing``, or 

any value greater than or equal to 9223372036854775807. 

 

Examples 

-------- 

>>> from pyspark.sql import Window 

>>> from pyspark.sql import functions as func 

>>> from pyspark.sql import SQLContext 

>>> sc = SparkContext.getOrCreate() 

>>> sqlContext = SQLContext(sc) 

>>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")] 

>>> df = sqlContext.createDataFrame(tup, ["id", "category"]) 

>>> window = Window.partitionBy("category").orderBy("id").rowsBetween(Window.currentRow, 1) 

>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category", "sum").show() 

+---+--------+---+ 

| id|category|sum| 

+---+--------+---+ 

| 1| a| 2| 

| 1| a| 3| 

| 1| b| 3| 

| 2| a| 2| 

| 2| b| 5| 

| 3| b| 3| 

+---+--------+---+ 

 

""" 

if start <= Window._PRECEDING_THRESHOLD: 

start = Window.unboundedPreceding 

if end >= Window._FOLLOWING_THRESHOLD: 

end = Window.unboundedFollowing 

sc = SparkContext._active_spark_context 

jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rowsBetween(start, end) 

return WindowSpec(jspec) 

 

@staticmethod 

def rangeBetween(start, end): 

""" 

Creates a :class:`WindowSpec` with the frame boundaries defined, 

from `start` (inclusive) to `end` (inclusive). 

 

Both `start` and `end` are relative from the current row. For example, 

"0" means "current row", while "-1" means one off before the current row, 

and "5" means the five off after the current row. 

 

We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, 

and ``Window.currentRow`` to specify special boundary values, rather than using integral 

values directly. 

 

A range-based boundary is based on the actual value of the ORDER BY 

expression(s). An offset is used to alter the value of the ORDER BY expression, for 

instance if the current ORDER BY expression has a value of 10 and the lower bound offset 

is -3, the resulting lower bound for the current row will be 10 - 3 = 7. This however puts a 

number of constraints on the ORDER BY expressions: there can be only one expression and this 

expression must have a numerical data type. An exception can be made when the offset is 

unbounded, because no value modification is needed, in this case multiple and non-numeric 

ORDER BY expression are allowed. 

 

.. versionadded:: 2.1.0 

 

Parameters 

---------- 

start : int 

boundary start, inclusive. 

The frame is unbounded if this is ``Window.unboundedPreceding``, or 

any value less than or equal to max(-sys.maxsize, -9223372036854775808). 

end : int 

boundary end, inclusive. 

The frame is unbounded if this is ``Window.unboundedFollowing``, or 

any value greater than or equal to min(sys.maxsize, 9223372036854775807). 

 

Examples 

-------- 

>>> from pyspark.sql import Window 

>>> from pyspark.sql import functions as func 

>>> from pyspark.sql import SQLContext 

>>> sc = SparkContext.getOrCreate() 

>>> sqlContext = SQLContext(sc) 

>>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")] 

>>> df = sqlContext.createDataFrame(tup, ["id", "category"]) 

>>> window = Window.partitionBy("category").orderBy("id").rangeBetween(Window.currentRow, 1) 

>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category").show() 

+---+--------+---+ 

| id|category|sum| 

+---+--------+---+ 

| 1| a| 4| 

| 1| a| 4| 

| 1| b| 3| 

| 2| a| 2| 

| 2| b| 5| 

| 3| b| 3| 

+---+--------+---+ 

 

""" 

208 ↛ 210line 208 didn't jump to line 210, because the condition on line 208 was never false if start <= Window._PRECEDING_THRESHOLD: 

start = Window.unboundedPreceding 

210 ↛ 212line 210 didn't jump to line 212, because the condition on line 210 was never false if end >= Window._FOLLOWING_THRESHOLD: 

end = Window.unboundedFollowing 

sc = SparkContext._active_spark_context 

jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rangeBetween(start, end) 

return WindowSpec(jspec) 

 

 

class WindowSpec(object): 

""" 

A window specification that defines the partitioning, ordering, 

and frame boundaries. 

 

Use the static methods in :class:`Window` to create a :class:`WindowSpec`. 

 

.. versionadded:: 1.4.0 

""" 

 

def __init__(self, jspec): 

self._jspec = jspec 

 

def partitionBy(self, *cols): 

""" 

Defines the partitioning columns in a :class:`WindowSpec`. 

 

.. versionadded:: 1.4.0 

 

Parameters 

---------- 

cols : str, :class:`Column` or list 

names of columns or expressions 

""" 

return WindowSpec(self._jspec.partitionBy(_to_java_cols(cols))) 

 

def orderBy(self, *cols): 

""" 

Defines the ordering columns in a :class:`WindowSpec`. 

 

.. versionadded:: 1.4.0 

 

Parameters 

---------- 

cols : str, :class:`Column` or list 

names of columns or expressions 

""" 

return WindowSpec(self._jspec.orderBy(_to_java_cols(cols))) 

 

def rowsBetween(self, start, end): 

""" 

Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive). 

 

Both `start` and `end` are relative positions from the current row. 

For example, "0" means "current row", while "-1" means the row before 

the current row, and "5" means the fifth row after the current row. 

 

We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, 

and ``Window.currentRow`` to specify special boundary values, rather than using integral 

values directly. 

 

.. versionadded:: 1.4.0 

 

Parameters 

---------- 

start : int 

boundary start, inclusive. 

The frame is unbounded if this is ``Window.unboundedPreceding``, or 

any value less than or equal to max(-sys.maxsize, -9223372036854775808). 

end : int 

boundary end, inclusive. 

The frame is unbounded if this is ``Window.unboundedFollowing``, or 

any value greater than or equal to min(sys.maxsize, 9223372036854775807). 

""" 

if start <= Window._PRECEDING_THRESHOLD: 

start = Window.unboundedPreceding 

if end >= Window._FOLLOWING_THRESHOLD: 

end = Window.unboundedFollowing 

return WindowSpec(self._jspec.rowsBetween(start, end)) 

 

def rangeBetween(self, start, end): 

""" 

Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive). 

 

Both `start` and `end` are relative from the current row. For example, 

"0" means "current row", while "-1" means one off before the current row, 

and "5" means the five off after the current row. 

 

We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``, 

and ``Window.currentRow`` to specify special boundary values, rather than using integral 

values directly. 

 

.. versionadded:: 1.4.0 

 

Parameters 

---------- 

start : int 

boundary start, inclusive. 

The frame is unbounded if this is ``Window.unboundedPreceding``, or 

any value less than or equal to max(-sys.maxsize, -9223372036854775808). 

end : int 

boundary end, inclusive. 

The frame is unbounded if this is ``Window.unboundedFollowing``, or 

any value greater than or equal to min(sys.maxsize, 9223372036854775807). 

""" 

312 ↛ 313line 312 didn't jump to line 313, because the condition on line 312 was never true if start <= Window._PRECEDING_THRESHOLD: 

start = Window.unboundedPreceding 

314 ↛ 315line 314 didn't jump to line 315, because the condition on line 314 was never true if end >= Window._FOLLOWING_THRESHOLD: 

end = Window.unboundedFollowing 

return WindowSpec(self._jspec.rangeBetween(start, end)) 

 

 

def _test(): 

import doctest 

import pyspark.sql.window 

SparkContext('local[4]', 'PythonTest') 

globs = pyspark.sql.window.__dict__.copy() 

(failure_count, test_count) = doctest.testmod( 

pyspark.sql.window, globs=globs, 

optionflags=doctest.NORMALIZE_WHITESPACE) 

327 ↛ 328line 327 didn't jump to line 328, because the condition on line 327 was never true if failure_count: 

sys.exit(-1) 

 

 

if __name__ == "__main__": 

_test()