#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for feature in MLlib.
"""
from __future__ import absolute_import
import sys
import warnings
import random
from py4j.protocol import Py4JJavaError
from pyspark import RDD, SparkContext
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Vectors, Vector, _convert_to_vector
__all__ = ['Normalizer', 'StandardScalerModel', 'StandardScaler',
           'HashingTF', 'IDFModel', 'IDF', 'Word2Vec', 'Word2VecModel']
class VectorTransformer(object):
    """
    .. note:: DeveloperApi
    Base class for transformation of a vector or RDD of vector
    """
    def transform(self, vector):
        """
        Applies transformation on a vector.
        :param vector: vector to be transformed.
        """
        raise NotImplementedError
[docs]class Normalizer(VectorTransformer):
    """
    .. note:: Experimental
    Normalizes samples individually to unit L\ :sup:`p`\  norm
    For any 1 <= `p` < float('inf'), normalizes samples using
    sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm.
    For `p` = float('inf'), max(abs(vector)) will be used as norm for
    normalization.
    >>> v = Vectors.dense(range(3))
    >>> nor = Normalizer(1)
    >>> nor.transform(v)
    DenseVector([0.0, 0.3333, 0.6667])
    >>> rdd = sc.parallelize([v])
    >>> nor.transform(rdd).collect()
    [DenseVector([0.0, 0.3333, 0.6667])]
    >>> nor2 = Normalizer(float("inf"))
    >>> nor2.transform(v)
    DenseVector([0.0, 0.5, 1.0])
    """
    def __init__(self, p=2.0):
        """
        :param p: Normalization in L^p^ space, p = 2 by default.
        """
        assert p >= 1.0, "p should be greater than 1.0"
        self.p = float(p)
 
class JavaVectorTransformer(JavaModelWrapper, VectorTransformer):
    """
    Wrapper for the model in JVM
    """
    def transform(self, vector):
        if isinstance(vector, RDD):
            vector = vector.map(_convert_to_vector)
        else:
            vector = _convert_to_vector(vector)
        return self.call("transform", vector)
[docs]class StandardScalerModel(JavaVectorTransformer):
    """
    .. note:: Experimental
    Represents a StandardScaler model that can transform vectors.
    """
 
[docs]class StandardScaler(object):
    """
    .. note:: Experimental
    Standardizes features by removing the mean and scaling to unit
    variance using column summary statistics on the samples in the
    training set.
    >>> vs = [Vectors.dense([-2.0, 2.3, 0]), Vectors.dense([3.8, 0.0, 1.9])]
    >>> dataset = sc.parallelize(vs)
    >>> standardizer = StandardScaler(True, True)
    >>> model = standardizer.fit(dataset)
    >>> result = model.transform(dataset)
    >>> for r in result.collect(): r
    DenseVector([-0.7071, 0.7071, -0.7071])
    DenseVector([0.7071, -0.7071, 0.7071])
    """
    def __init__(self, withMean=False, withStd=True):
        """
        :param withMean: False by default. Centers the data with mean
                 before scaling. It will build a dense output, so this
                 does not work on sparse input and will raise an
                 exception.
        :param withStd: True by default. Scales the data to unit
                 standard deviation.
        """
        if not (withMean or withStd):
            warnings.warn("Both withMean and withStd are false. The model does nothing.")
        self.withMean = withMean
        self.withStd = withStd
[docs]    def fit(self, dataset):
        """
        Computes the mean and variance and stores as a model to be used
        for later scaling.
        :param data: The data used to compute the mean and variance
                 to build the transformation model.
        :return: a StandardScalarModel
        """
        dataset = dataset.map(_convert_to_vector)
        jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset)
        return StandardScalerModel(jmodel)
  
[docs]class HashingTF(object):
    """
    .. note:: Experimental
    Maps a sequence of terms to their term frequencies using the hashing
    trick.
    Note: the terms must be hashable (can not be dict/set/list...).
    >>> htf = HashingTF(100)
    >>> doc = "a a b b c d".split(" ")
    >>> htf.transform(doc)
    SparseVector(100, {1: 1.0, 14: 1.0, 31: 2.0, 44: 2.0})
    """
    def __init__(self, numFeatures=1 << 20):
        """
        :param numFeatures: number of features (default: 2^20)
        """
        self.numFeatures = numFeatures
[docs]    def indexOf(self, term):
        """ Returns the index of the input term. """
        return hash(term) % self.numFeatures
 
 
[docs]class IDFModel(JavaVectorTransformer):
    """
    Represents an IDF model that can transform term frequency vectors.
    """
 
[docs]class IDF(object):
    """
    .. note:: Experimental
    Inverse document frequency (IDF).
    The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`,
    where `m` is the total number of documents and `d(t)` is the number
    of documents that contain term `t`.
    This implementation supports filtering out terms which do not appear
    in a minimum number of documents (controlled by the variable
    `minDocFreq`). For terms that are not in at least `minDocFreq`
    documents, the IDF is found as 0, resulting in TF-IDFs of 0.
    >>> n = 4
    >>> freqs = [Vectors.sparse(n, (1, 3), (1.0, 2.0)),
    ...          Vectors.dense([0.0, 1.0, 2.0, 3.0]),
    ...          Vectors.sparse(n, [1], [1.0])]
    >>> data = sc.parallelize(freqs)
    >>> idf = IDF()
    >>> model = idf.fit(data)
    >>> tfidf = model.transform(data)
    >>> for r in tfidf.collect(): r
    SparseVector(4, {1: 0.0, 3: 0.5754})
    DenseVector([0.0, 0.0, 1.3863, 0.863])
    SparseVector(4, {1: 0.0})
    >>> model.transform(Vectors.dense([0.0, 1.0, 2.0, 3.0]))
    DenseVector([0.0, 0.0, 1.3863, 0.863])
    >>> model.transform([0.0, 1.0, 2.0, 3.0])
    DenseVector([0.0, 0.0, 1.3863, 0.863])
    >>> model.transform(Vectors.sparse(n, (1, 3), (1.0, 2.0)))
    SparseVector(4, {1: 0.0, 3: 0.5754})
    """
    def __init__(self, minDocFreq=0):
        """
        :param minDocFreq: minimum of documents in which a term
                           should appear for filtering
        """
        self.minDocFreq = minDocFreq
[docs]    def fit(self, dataset):
        """
        Computes the inverse document frequency.
        :param dataset: an RDD of term frequency vectors
        """
        if not isinstance(dataset, RDD):
            raise TypeError("dataset should be an RDD of term frequency vectors")
        jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector))
        return IDFModel(jmodel)
  
[docs]class Word2VecModel(JavaVectorTransformer):
    """
    class for Word2Vec model
    """
[docs]    def findSynonyms(self, word, num):
        """
        Find synonyms of a word
        :param word: a word or a vector representation of word
        :param num: number of synonyms to find
        :return: array of (word, cosineSimilarity)
        Note: local use only
        """
        if not isinstance(word, basestring):
            word = _convert_to_vector(word)
        words, similarity = self.call("findSynonyms", word, num)
        return zip(words, similarity)
  
[docs]class Word2Vec(object):
    """
    Word2Vec creates vector representation of words in a text corpus.
    The algorithm first constructs a vocabulary from the corpus
    and then learns vector representation of words in the vocabulary.
    The vector representation can be used as features in
    natural language processing and machine learning algorithms.
    We used skip-gram model in our implementation and hierarchical
    softmax method to train the model. The variable names in the
    implementation matches the original C implementation.
    For original C implementation,
    see https://code.google.com/p/word2vec/
    For research papers, see
    Efficient Estimation of Word Representations in Vector Space
    and Distributed Representations of Words and Phrases and their
    Compositionality.
    >>> sentence = "a b " * 100 + "a c " * 10
    >>> localDoc = [sentence, sentence]
    >>> doc = sc.parallelize(localDoc).map(lambda line: line.split(" "))
    >>> model = Word2Vec().setVectorSize(10).setSeed(42L).fit(doc)
    >>> syms = model.findSynonyms("a", 2)
    >>> [s[0] for s in syms]
    [u'b', u'c']
    >>> vec = model.transform("a")
    >>> syms = model.findSynonyms(vec, 2)
    >>> [s[0] for s in syms]
    [u'b', u'c']
    """
    def __init__(self):
        """
        Construct Word2Vec instance
        """
        self.vectorSize = 100
        self.learningRate = 0.025
        self.numPartitions = 1
        self.numIterations = 1
        self.seed = random.randint(0, sys.maxint)
[docs]    def setVectorSize(self, vectorSize):
        """
        Sets vector size (default: 100).
        """
        self.vectorSize = vectorSize
        return self
 
[docs]    def setLearningRate(self, learningRate):
        """
        Sets initial learning rate (default: 0.025).
        """
        self.learningRate = learningRate
        return self
 
[docs]    def setNumPartitions(self, numPartitions):
        """
        Sets number of partitions (default: 1). Use a small number for
        accuracy.
        """
        self.numPartitions = numPartitions
        return self
 
[docs]    def setNumIterations(self, numIterations):
        """
        Sets number of iterations (default: 1), which should be smaller
        than or equal to number of partitions.
        """
        self.numIterations = numIterations
        return self
 
[docs]    def setSeed(self, seed):
        """
        Sets random seed.
        """
        self.seed = seed
        return self
 
[docs]    def fit(self, data):
        """
        Computes the vector representation of each word in vocabulary.
        :param data: training data. RDD of list of string
        :return: Word2VecModel instance
        """
        if not isinstance(data, RDD):
            raise TypeError("data should be an RDD of list of string")
        jmodel = callMLlibFunc("trainWord2Vec", data, int(self.vectorSize),
                               float(self.learningRate), int(self.numPartitions),
                               int(self.numIterations), long(self.seed))
        return Word2VecModel(jmodel)
  
def _test():
    import doctest
    from pyspark import SparkContext
    globs = globals().copy()
    globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
    globs['sc'].stop()
    if failure_count:
        exit(-1)
if __name__ == "__main__":
    sys.path.pop(0)
    _test()