Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

# 

# Licensed to the Apache Software Foundation (ASF) under one or more 

# contributor license agreements. See the NOTICE file distributed with 

# this work for additional information regarding copyright ownership. 

# The ASF licenses this file to You under the Apache License, Version 2.0 

# (the "License"); you may not use this file except in compliance with 

# the License. You may obtain a copy of the License at 

# 

# http://www.apache.org/licenses/LICENSE-2.0 

# 

# Unless required by applicable law or agreed to in writing, software 

# distributed under the License is distributed on an "AS IS" BASIS, 

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

# See the License for the specific language governing permissions and 

# limitations under the License. 

# 

import random 

import unittest 

 

from py4j.protocol import Py4JJavaError 

 

from pyspark import shuffle, PickleSerializer, SparkConf, SparkContext 

from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter 

 

 

class MergerTests(unittest.TestCase): 

 

def setUp(self): 

self.N = 1 << 12 

self.l = [i for i in range(self.N)] 

self.data = list(zip(self.l, self.l)) 

32 ↛ exitline 33 didn't finish the lambda on line 33 self.agg = Aggregator(lambda x: [x], 

lambda x, y: x.append(y) or x, 

lambda x, y: x.extend(y) or x) 

 

def test_small_dataset(self): 

m = ExternalMerger(self.agg, 1000) 

m.mergeValues(self.data) 

self.assertEqual(m.spills, 0) 

self.assertEqual(sum(sum(v) for k, v in m.items()), 

sum(range(self.N))) 

 

m = ExternalMerger(self.agg, 1000) 

m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data)) 

self.assertEqual(m.spills, 0) 

self.assertEqual(sum(sum(v) for k, v in m.items()), 

sum(range(self.N))) 

 

def test_medium_dataset(self): 

m = ExternalMerger(self.agg, 20) 

m.mergeValues(self.data) 

self.assertTrue(m.spills >= 1) 

self.assertEqual(sum(sum(v) for k, v in m.items()), 

sum(range(self.N))) 

 

m = ExternalMerger(self.agg, 10) 

m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3)) 

self.assertTrue(m.spills >= 1) 

self.assertEqual(sum(sum(v) for k, v in m.items()), 

sum(range(self.N)) * 3) 

 

def test_huge_dataset(self): 

m = ExternalMerger(self.agg, 5, partitions=3) 

m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10)) 

self.assertTrue(m.spills >= 1) 

self.assertEqual(sum(len(v) for k, v in m.items()), 

self.N * 10) 

m._cleanup() 

 

def test_group_by_key(self): 

 

def gen_data(N, step): 

for i in range(1, N + 1, step): 

for j in range(i): 

yield (i, [j]) 

 

def gen_gs(N, step=1): 

return shuffle.GroupByKey(gen_data(N, step)) 

 

self.assertEqual(1, len(list(gen_gs(1)))) 

self.assertEqual(2, len(list(gen_gs(2)))) 

self.assertEqual(100, len(list(gen_gs(100)))) 

self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)]) 

self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100))) 

 

for k, vs in gen_gs(50002, 10000): 

self.assertEqual(k, len(vs)) 

self.assertEqual(list(range(k)), list(vs)) 

 

ser = PickleSerializer() 

l = ser.loads(ser.dumps(list(gen_gs(50002, 30000)))) 

for k, vs in l: 

self.assertEqual(k, len(vs)) 

self.assertEqual(list(range(k)), list(vs)) 

 

def test_stopiteration_is_raised(self): 

 

def stopit(*args, **kwargs): 

raise StopIteration() 

 

def legit_create_combiner(x): 

return [x] 

 

def legit_merge_value(x, y): 

return x.append(y) or x 

 

def legit_merge_combiners(x, y): 

return x.extend(y) or x 

 

data = [(x % 2, x) for x in range(100)] 

 

# wrong create combiner 

m = ExternalMerger(Aggregator(stopit, legit_merge_value, legit_merge_combiners), 20) 

with self.assertRaises((Py4JJavaError, RuntimeError)) as cm: 

m.mergeValues(data) 

 

# wrong merge value 

m = ExternalMerger(Aggregator(legit_create_combiner, stopit, legit_merge_combiners), 20) 

with self.assertRaises((Py4JJavaError, RuntimeError)) as cm: 

m.mergeValues(data) 

 

# wrong merge combiners 

m = ExternalMerger(Aggregator(legit_create_combiner, legit_merge_value, stopit), 20) 

with self.assertRaises((Py4JJavaError, RuntimeError)) as cm: 

m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), data)) 

 

 

class SorterTests(unittest.TestCase): 

def test_in_memory_sort(self): 

l = list(range(1024)) 

random.shuffle(l) 

sorter = ExternalSorter(1024) 

self.assertEqual(sorted(l), list(sorter.sorted(l))) 

self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True))) 

self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x))) 

self.assertEqual(sorted(l, key=lambda x: -x, reverse=True), 

list(sorter.sorted(l, key=lambda x: -x, reverse=True))) 

 

def test_external_sort(self): 

class CustomizedSorter(ExternalSorter): 

def _next_limit(self): 

return self.memory_limit 

l = list(range(1024)) 

random.shuffle(l) 

sorter = CustomizedSorter(1) 

self.assertEqual(sorted(l), list(sorter.sorted(l))) 

self.assertGreater(shuffle.DiskBytesSpilled, 0) 

last = shuffle.DiskBytesSpilled 

self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True))) 

self.assertGreater(shuffle.DiskBytesSpilled, last) 

last = shuffle.DiskBytesSpilled 

self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x))) 

self.assertGreater(shuffle.DiskBytesSpilled, last) 

last = shuffle.DiskBytesSpilled 

self.assertEqual(sorted(l, key=lambda x: -x, reverse=True), 

list(sorter.sorted(l, key=lambda x: -x, reverse=True))) 

self.assertGreater(shuffle.DiskBytesSpilled, last) 

 

def test_external_sort_in_rdd(self): 

conf = SparkConf().set("spark.python.worker.memory", "1m") 

sc = SparkContext(conf=conf) 

l = list(range(10240)) 

random.shuffle(l) 

rdd = sc.parallelize(l, 4) 

self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect()) 

sc.stop() 

 

 

if __name__ == "__main__": 

from pyspark.tests.test_shuffle import * # noqa: F401 

 

try: 

import xmlrunner # type: ignore[import] 

testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) 

except ImportError: 

testRunner = None 

unittest.main(testRunner=testRunner, verbosity=2)