Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
Python package for feature in MLlib. """
'HashingTF', 'IDFModel', 'IDF', 'Word2Vec', 'Word2VecModel', 'ChiSqSelector', 'ChiSqSelectorModel', 'ElementwiseProduct']
""" Base class for transformation of a vector or RDD of vector """ """ Applies transformation on a vector.
Parameters ---------- vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` vector or convertible or RDD to be transformed. """ raise NotImplementedError
r""" Normalizes samples individually to unit L\ :sup:`p`\ norm
For any 1 <= `p` < float('inf'), normalizes samples using sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm.
For `p` = float('inf'), max(abs(vector)) will be used as norm for normalization.
.. versionadded:: 1.2.0
Parameters ---------- p : float, optional Normalization in L^p^ space, p = 2 by default.
Examples -------- >>> from pyspark.mllib.linalg import Vectors >>> v = Vectors.dense(range(3)) >>> nor = Normalizer(1) >>> nor.transform(v) DenseVector([0.0, 0.3333, 0.6667])
>>> rdd = sc.parallelize([v]) >>> nor.transform(rdd).collect() [DenseVector([0.0, 0.3333, 0.6667])]
>>> nor2 = Normalizer(float("inf")) >>> nor2.transform(v) DenseVector([0.0, 0.5, 1.0]) """
""" Applies unit length normalization on a vector.
.. versionadded:: 1.2.0
Parameters ---------- vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` vector or RDD of vector to be normalized.
Returns ------- :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` normalized vector(s). If the norm of the input is zero, it will return the input vector. """ else:
""" Wrapper for the model in JVM """
""" Applies transformation on a vector or an RDD[Vector].
Parameters ---------- vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` Input vector(s) to be transformed.
Notes ----- In Python, transform cannot currently be used within an RDD transformation or action. Call transform directly on the RDD instead. """ else:
""" Represents a StandardScaler model that can transform vectors.
.. versionadded:: 1.2.0 """
""" Applies standardization transformation on a vector.
.. versionadded:: 1.2.0
Parameters ---------- vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` Input vector(s) to be standardized.
Returns ------- :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` Standardized vector(s). If the variance of a column is zero, it will return default `0.0` for the column with zero variance.
Notes ----- In Python, transform cannot currently be used within an RDD transformation or action. Call transform directly on the RDD instead. """
def setWithMean(self, withMean): """ Setter of the boolean which decides whether it uses mean or not """
def setWithStd(self, withStd): """ Setter of the boolean which decides whether it uses std or not """
def withStd(self): """ Returns if the model scales the data to unit standard deviation. """
def withMean(self): """ Returns if the model centers the data before scaling. """
def std(self): """ Return the column standard deviation values. """
def mean(self): """ Return the column mean values. """
""" Standardizes features by removing the mean and scaling to unit variance using column summary statistics on the samples in the training set.
.. versionadded:: 1.2.0
Parameters ---------- withMean : bool, optional False by default. Centers the data with mean before scaling. It will build a dense output, so take care when applying to sparse input. withStd : bool, optional True by default. Scales the data to unit standard deviation.
Examples -------- >>> vs = [Vectors.dense([-2.0, 2.3, 0]), Vectors.dense([3.8, 0.0, 1.9])] >>> dataset = sc.parallelize(vs) >>> standardizer = StandardScaler(True, True) >>> model = standardizer.fit(dataset) >>> result = model.transform(dataset) >>> for r in result.collect(): r DenseVector([-0.7071, 0.7071, -0.7071]) DenseVector([0.7071, -0.7071, 0.7071]) >>> int(model.std[0]) 4 >>> int(model.mean[0]*10) 9 >>> model.withStd True >>> model.withMean True """ warnings.warn("Both withMean and withStd are false. The model does nothing.")
""" Computes the mean and variance and stores as a model to be used for later scaling.
.. versionadded:: 1.2.0
Parameters ---------- dataset : :py:class:`pyspark.RDD` The data used to compute the mean and variance to build the transformation model.
Returns ------- :py:class:`StandardScalerModel` """
""" Represents a Chi Squared selector model.
.. versionadded:: 1.4.0 """
""" Applies transformation on a vector.
.. versionadded:: 1.4.0
Examples -------- vector : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` Input vector(s) to be transformed.
Returns ------- :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` transformed vector(s). """
""" Creates a ChiSquared feature selector. The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`, `fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold, thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/ False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_ to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by 1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features set to 50.
.. versionadded:: 1.4.0
Examples -------- >>> from pyspark.mllib.linalg import SparseVector, DenseVector >>> from pyspark.mllib.regression import LabeledPoint >>> data = sc.parallelize([ ... LabeledPoint(0.0, SparseVector(3, {0: 8.0, 1: 7.0})), ... LabeledPoint(1.0, SparseVector(3, {1: 9.0, 2: 6.0})), ... LabeledPoint(1.0, [0.0, 9.0, 8.0]), ... LabeledPoint(2.0, [7.0, 9.0, 5.0]), ... LabeledPoint(2.0, [8.0, 7.0, 3.0]) ... ]) >>> model = ChiSqSelector(numTopFeatures=1).fit(data) >>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0})) SparseVector(1, {}) >>> model.transform(DenseVector([7.0, 9.0, 5.0])) DenseVector([7.0]) >>> model = ChiSqSelector(selectorType="fpr", fpr=0.2).fit(data) >>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0})) SparseVector(1, {}) >>> model.transform(DenseVector([7.0, 9.0, 5.0])) DenseVector([7.0]) >>> model = ChiSqSelector(selectorType="percentile", percentile=0.34).fit(data) >>> model.transform(DenseVector([7.0, 9.0, 5.0])) DenseVector([7.0]) """ fdr=0.05, fwe=0.05):
def setNumTopFeatures(self, numTopFeatures): """ set numTopFeature for feature selection by number of top features. Only applicable when selectorType = "numTopFeatures". """ self.numTopFeatures = int(numTopFeatures) return self
def setPercentile(self, percentile): """ set percentile [0.0, 1.0] for feature selection by percentile. Only applicable when selectorType = "percentile". """ self.percentile = float(percentile) return self
def setFpr(self, fpr): """ set FPR [0.0, 1.0] for feature selection by FPR. Only applicable when selectorType = "fpr". """ self.fpr = float(fpr) return self
def setFdr(self, fdr): """ set FDR [0.0, 1.0] for feature selection by FDR. Only applicable when selectorType = "fdr". """ self.fdr = float(fdr) return self
def setFwe(self, fwe): """ set FWE [0.0, 1.0] for feature selection by FWE. Only applicable when selectorType = "fwe". """ self.fwe = float(fwe) return self
def setSelectorType(self, selectorType): """ set the selector type of the ChisqSelector. Supported options: "numTopFeatures" (default), "percentile", "fpr", "fdr", "fwe". """ self.selectorType = str(selectorType) return self
""" Returns a ChiSquared feature selector.
.. versionadded:: 1.4.0
Parameters ---------- data : :py:class:`pyspark.RDD` of :py:class:`pyspark.mllib.regression.LabeledPoint` containing the labeled dataset with categorical features. Real-valued features will be treated as categorical for each distinct value. Apply feature discretizer before using this function. """ self.percentile, self.fpr, self.fdr, self.fwe, data)
""" Model fitted by [[PCA]] that can project vectors to a low-dimensional space using PCA.
.. versionadded:: 1.5.0 """
""" A feature transformer that projects vectors to a low-dimensional space using PCA.
.. versionadded:: 1.5.0
Examples -------- >>> data = [Vectors.sparse(5, [(1, 1.0), (3, 7.0)]), ... Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]), ... Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0])] >>> model = PCA(2).fit(sc.parallelize(data)) >>> pcArray = model.transform(Vectors.sparse(5, [(1, 1.0), (3, 7.0)])).toArray() >>> pcArray[0] 1.648... >>> pcArray[1] -4.013... """ """ Parameters ---------- k : int number of principal components. """
""" Computes a [[PCAModel]] that contains the principal components of the input vectors.
.. versionadded:: 1.5.0
Parameters ---------- data : :py:class:`pyspark.RDD` source vectors """
""" Maps a sequence of terms to their term frequencies using the hashing trick.
.. versionadded:: 1.2.0
Parameters ---------- numFeatures : int, optional number of features (default: 2^20)
Notes ----- The terms must be hashable (can not be dict/set/list...).
Examples -------- >>> htf = HashingTF(100) >>> doc = "a a b b c d".split(" ") >>> htf.transform(doc) SparseVector(100, {...}) """
def setBinary(self, value): """ If True, term frequency vector will be binary such that non-zero term counts will be set to 1 (default: False) """
def indexOf(self, term): """ Returns the index of the input term. """
def transform(self, document): """ Transforms the input document (list of terms) to term frequency vectors, or transform the RDD of document to RDD of term frequency vectors. """ return document.map(self.transform)
""" Represents an IDF model that can transform term frequency vectors.
.. versionadded:: 1.2.0 """
""" Transforms term frequency (TF) vectors to TF-IDF vectors.
If `minDocFreq` was set for the IDF calculation, the terms which occur in fewer than `minDocFreq` documents will have an entry of 0.
.. versionadded:: 1.2.0
Parameters ---------- x : :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` an RDD of term frequency vectors or a term frequency vector
Returns ------- :py:class:`pyspark.mllib.linalg.Vector` or :py:class:`pyspark.RDD` an RDD of TF-IDF vectors or a TF-IDF vector
Notes ----- In Python, transform cannot currently be used within an RDD transformation or action. Call transform directly on the RDD instead. """
def idf(self): """ Returns the current IDF vector. """
def docFreq(self): """ Returns the document frequency. """ return self.call('docFreq')
def numDocs(self): """ Returns number of documents evaluated to compute idf """ return self.call('numDocs')
""" Inverse document frequency (IDF).
The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`, where `m` is the total number of documents and `d(t)` is the number of documents that contain term `t`.
This implementation supports filtering out terms which do not appear in a minimum number of documents (controlled by the variable `minDocFreq`). For terms that are not in at least `minDocFreq` documents, the IDF is found as 0, resulting in TF-IDFs of 0.
.. versionadded:: 1.2.0
Parameters ---------- minDocFreq : int minimum of documents in which a term should appear for filtering
Examples -------- >>> n = 4 >>> freqs = [Vectors.sparse(n, (1, 3), (1.0, 2.0)), ... Vectors.dense([0.0, 1.0, 2.0, 3.0]), ... Vectors.sparse(n, [1], [1.0])] >>> data = sc.parallelize(freqs) >>> idf = IDF() >>> model = idf.fit(data) >>> tfidf = model.transform(data) >>> for r in tfidf.collect(): r SparseVector(4, {1: 0.0, 3: 0.5754}) DenseVector([0.0, 0.0, 1.3863, 0.863]) SparseVector(4, {1: 0.0}) >>> model.transform(Vectors.dense([0.0, 1.0, 2.0, 3.0])) DenseVector([0.0, 0.0, 1.3863, 0.863]) >>> model.transform([0.0, 1.0, 2.0, 3.0]) DenseVector([0.0, 0.0, 1.3863, 0.863]) >>> model.transform(Vectors.sparse(n, (1, 3), (1.0, 2.0))) SparseVector(4, {1: 0.0, 3: 0.5754}) """
""" Computes the inverse document frequency.
.. versionadded:: 1.2.0
Parameters ---------- dataset : :py:class:`pyspark.RDD` an RDD of term frequency vectors """ raise TypeError("dataset should be an RDD of term frequency vectors")
""" class for Word2Vec model """
""" Transforms a word to its vector representation
.. versionadded:: 1.2.0
Parameters ---------- word : str a word
Returns ------- :py:class:`pyspark.mllib.linalg.Vector` vector representation of word(s)
Notes ----- Local use only """ except Py4JJavaError: raise ValueError("%s not found" % word)
""" Find synonyms of a word
.. versionadded:: 1.2.0
Parameters ----------
word : str or :py:class:`pyspark.mllib.linalg.Vector` a word or a vector representation of word num : int number of synonyms to find
Returns ------- :py:class:`collections.abc.Iterable` array of (word, cosineSimilarity)
Notes ----- Local use only """
def getVectors(self): """ Returns a map of words to their vector representations. """
def load(cls, sc, path): """ Load a model from the given path. """ .Word2VecModel.load(sc._jsc.sc(), path)
"""Word2Vec creates vector representation of words in a text corpus. The algorithm first constructs a vocabulary from the corpus and then learns vector representation of words in the vocabulary. The vector representation can be used as features in natural language processing and machine learning algorithms.
We used skip-gram model in our implementation and hierarchical softmax method to train the model. The variable names in the implementation matches the original C implementation.
For original C implementation, see https://code.google.com/p/word2vec/ For research papers, see Efficient Estimation of Word Representations in Vector Space and Distributed Representations of Words and Phrases and their Compositionality.
.. versionadded:: 1.2.0
Examples -------- >>> sentence = "a b " * 100 + "a c " * 10 >>> localDoc = [sentence, sentence] >>> doc = sc.parallelize(localDoc).map(lambda line: line.split(" ")) >>> model = Word2Vec().setVectorSize(10).setSeed(42).fit(doc)
Querying for synonyms of a word will not return that word:
>>> syms = model.findSynonyms("a", 2) >>> [s[0] for s in syms] ['b', 'c']
But querying for synonyms of a vector may return the word whose representation is that vector:
>>> vec = model.transform("a") >>> syms = model.findSynonyms(vec, 2) >>> [s[0] for s in syms] ['a', 'b']
>>> import os, tempfile >>> path = tempfile.mkdtemp() >>> model.save(sc, path) >>> sameModel = Word2VecModel.load(sc, path) >>> model.transform("a") == sameModel.transform("a") True >>> syms = sameModel.findSynonyms("a", 2) >>> [s[0] for s in syms] ['b', 'c'] >>> from shutil import rmtree >>> try: ... rmtree(path) ... except OSError: ... pass """ """ Construct Word2Vec instance """
def setVectorSize(self, vectorSize): """ Sets vector size (default: 100). """
def setLearningRate(self, learningRate): """ Sets initial learning rate (default: 0.025). """
def setNumPartitions(self, numPartitions): """ Sets number of partitions (default: 1). Use a small number for accuracy. """
def setNumIterations(self, numIterations): """ Sets number of iterations (default: 1), which should be smaller than or equal to number of partitions. """
def setSeed(self, seed): """ Sets random seed. """
def setMinCount(self, minCount): """ Sets minCount, the minimum number of times a token must appear to be included in the word2vec model's vocabulary (default: 5). """
def setWindowSize(self, windowSize): """ Sets window size (default: 5). """
""" Computes the vector representation of each word in vocabulary.
.. versionadded:: 1.2.0
Parameters ---------- data : :py:class:`pyspark.RDD` training data. RDD of list of string
Returns ------- :py:class:`Word2VecModel` """ raise TypeError("data should be an RDD of list of string") float(self.learningRate), int(self.numPartitions), int(self.numIterations), self.seed, int(self.minCount), int(self.windowSize))
""" Scales each column of the vector, with the supplied weight vector. i.e the elementwise product.
.. versionadded:: 1.5.0
Examples -------- >>> weight = Vectors.dense([1.0, 2.0, 3.0]) >>> eprod = ElementwiseProduct(weight) >>> a = Vectors.dense([2.0, 1.0, 3.0]) >>> eprod.transform(a) DenseVector([2.0, 2.0, 9.0]) >>> b = Vectors.dense([9.0, 3.0, 4.0]) >>> rdd = sc.parallelize([a, b]) >>> eprod.transform(rdd).collect() [DenseVector([2.0, 2.0, 9.0]), DenseVector([9.0, 6.0, 12.0])] """
def transform(self, vector): """ Computes the Hadamard product of the vector. """
else:
.master("local[4]")\ .appName("mllib.feature tests")\ .getOrCreate() sys.exit(-1)
|