Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
def setUpClass(cls):
def tearDownClass(cls):
"org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text").collect())
"org.apache.hadoop.io.LongWritable", "org.apache.hadoop.io.Text", conf=oldconf).collect()
basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text").collect())
"org.apache.hadoop.io.LongWritable", "org.apache.hadoop.io.Text", conf=newconf).collect()
basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text"))
basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text"))
basepath + "/sftestdata/sfint/", "org.apache.hadoop.io.NotValidWritable", "org.apache.hadoop.io.Text")) basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapred.NotValidInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text")) basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text"))
# use of custom converters basepath + "/sftestdata/sfmap/", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable", keyConverter="org.apache.spark.api.python.TestInputKeyConverter", valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect()) (u'\x01', [3.0]), (u'\x02', [1.0]), (u'\x02', [1.0]), (u'\x03', [2.0])]
(1, {"row1": 1.0}), (2, {"row2": 2.0})] basepath + "/oldhadoop/", "org.apache.hadoop.mapred.SequenceFileOutputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable") basepath + "/oldhadoop/", "org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable").collect()
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat", "mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable", "mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable", "mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/" } "org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable", conf=input_conf).collect()
(1, "a"), (2, "bcdf")] basepath + "/newhadoop/", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text") basepath + "/newhadoop/", "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text").collect())
"mapreduce.job.outputformat.class": "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable", "mapreduce.job.output.value.class": "org.apache.hadoop.io.Text", "mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/" } "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text", conf=input_conf).collect())
basepath + "/newolderror/saveAsHadoopFile/", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")) basepath + "/newolderror/saveAsNewAPIHadoopFile/", "org.apache.hadoop.mapred.SequenceFileOutputFormat"))
basepath + "/badinputs/saveAsHadoopFile/", "org.apache.hadoop.mapred.NotValidOutputFormat")) basepath + "/badinputs/saveAsNewAPIHadoopFile/", "org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
# use of custom converters (2, {1.0: u'aa'}), (3, {2.0: u'dd'})] basepath + "/converters/", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", keyConverter="org.apache.spark.api.python.TestOutputKeyConverter", valueConverter="org.apache.spark.api.python.TestOutputValueConverter") (u'2', 1.0), (u'3', 2.0)]
basepath + "/reserialize/hadoop", "org.apache.hadoop.mapred.SequenceFileOutputFormat")
basepath + "/reserialize/newhadoop", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat", "mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable", "mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable", "mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable", "mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable", "mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset" }
# non-batch-serialized RDD[[(K, V)]] should be rejected basepath + "/malformed/sequence"))
|