Home
Trees
Indices
Help
PySpark
[
frames
] |
no frames
]
Identifier Index
[
A
B
C
D
E
F
G
H
I
J
K
L
M
N
O
P
Q
R
S
T
U
V
W X Y
Z
_
]
A
Accumulator
(in
pyspark.accumulators
)
add()
(in
Accumulator
)
addInPlace()
(in
AddingAccumulatorParam
)
accumulator()
(in
SparkContext
)
addFile()
(in
SparkContext
)
addPyFile()
(in
SparkContext
)
AccumulatorParam
(in
pyspark.accumulators
)
AddingAccumulatorParam
(in
pyspark.accumulators
)
ALS
(in
pyspark.mllib.recommendation
)
accumulators
(in
pyspark
)
addInPlace()
(in
AccumulatorParam
)
B
broadcast
(in
pyspark
)
Broadcast
(in
pyspark.broadcast
)
broadcast()
(in
SparkContext
)
C
cache()
(in
RDD
)
collect()
(in
RDD
)
context()
(in
RDD
)
cartesian()
(in
RDD
)
collectAsMap()
(in
RDD
)
copy()
(in
StatCounter
)
checkpoint()
(in
RDD
)
combineByKey()
(in
RDD
)
count()
(in
RDD
)
classification
(in
pyspark.mllib
)
COMPLEX_ACCUMULATOR_PARAM
(in
pyspark.accumulators
)
count()
(in
StatCounter
)
clearFiles()
(in
SparkContext
)
conf
(in
pyspark
)
countByKey()
(in
RDD
)
clustering
(in
pyspark.mllib
)
contains()
(in
SparkConf
)
countByValue()
(in
RDD
)
cogroup()
(in
RDD
)
context
(in
pyspark
)
D
defaultParallelism()
(in
SparkContext
)
DISK_ONLY_2
(in
StorageLevel
)
dumps
(in
MarshalSerializer
)
DISK_ONLY
(in
StorageLevel
)
distinct()
(in
RDD
)
dumps()
(in
PickleSerializer
)
F
files
(in
pyspark
)
flatMap()
(in
RDD
)
fold()
(in
RDD
)
filter()
(in
RDD
)
flatMapValues()
(in
RDD
)
foreach()
(in
RDD
)
first()
(in
RDD
)
FLOAT_ACCUMULATOR_PARAM
(in
pyspark.accumulators
)
G
get()
(in
SparkConf
)
getCheckpointFile()
(in
RDD
)
groupBy()
(in
RDD
)
get()
(in
SparkFiles
)
getRootDirectory()
(in
SparkFiles
)
groupByKey()
(in
RDD
)
getAll()
(in
SparkConf
)
glom()
(in
RDD
)
groupWith()
(in
RDD
)
I
INT_ACCUMULATOR_PARAM
(in
pyspark.accumulators
)
isCheckpointed()
(in
RDD
)
J
join()
(in
RDD
)
K
keyBy()
(in
RDD
)
KMeans
(in
pyspark.mllib.clustering
)
KMeansModel
(in
pyspark.mllib.clustering
)
L
LassoModel
(in
pyspark.mllib.regression
)
LinearRegressionModel
(in
pyspark.mllib.regression
)
loads
(in
PickleSerializer
)
LassoWithSGD
(in
pyspark.mllib.regression
)
LinearRegressionModelBase
(in
pyspark.mllib.regression
)
LogisticRegressionModel
(in
pyspark.mllib.classification
)
leftOuterJoin()
(in
RDD
)
LinearRegressionWithSGD
(in
pyspark.mllib.regression
)
LogisticRegressionWithSGD
(in
pyspark.mllib.classification
)
LinearModel
(in
pyspark.mllib.regression
)
loads
(in
MarshalSerializer
)
M
map()
(in
RDD
)
mean()
(in
RDD
)
MEMORY_ONLY_2
(in
StorageLevel
)
mapPartitions()
(in
RDD
)
mean()
(in
StatCounter
)
MEMORY_ONLY_SER
(in
StorageLevel
)
mapPartitionsWithIndex()
(in
RDD
)
MEMORY_AND_DISK
(in
StorageLevel
)
MEMORY_ONLY_SER_2
(in
StorageLevel
)
mapPartitionsWithSplit()
(in
RDD
)
MEMORY_AND_DISK_2
(in
StorageLevel
)
merge()
(in
StatCounter
)
mapValues()
(in
RDD
)
MEMORY_AND_DISK_SER
(in
StorageLevel
)
mergeStats()
(in
StatCounter
)
MarshalSerializer
(in
pyspark.serializers
)
MEMORY_AND_DISK_SER_2
(in
StorageLevel
)
mllib
(in
pyspark
)
MatrixFactorizationModel
(in
pyspark.mllib.recommendation
)
MEMORY_ONLY
(in
StorageLevel
)
N
NaiveBayes
(in
pyspark.mllib.classification
)
NaiveBayesModel
(in
pyspark.mllib.classification
)
P
parallelize()
(in
SparkContext
)
pipe()
(in
RDD
)
predict()
(in
MatrixFactorizationModel
)
partitionBy()
(in
RDD
)
predict()
(in
LogisticRegressionModel
)
predict()
(in
LinearRegressionModelBase
)
persist()
(in
RDD
)
predict()
(in
NaiveBayesModel
)
predictAll()
(in
MatrixFactorizationModel
)
pickleSer
(in
pyspark.accumulators
)
predict()
(in
SVMModel
)
pyspark
PickleSerializer
(in
pyspark.serializers
)
predict()
(in
KMeansModel
)
R
rdd
(in
pyspark
)
reduceByKey()
(in
RDD
)
RidgeRegressionWithSGD
(in
pyspark.mllib.regression
)
RDD
(in
pyspark.rdd
)
reduceByKeyLocally()
(in
RDD
)
rightOuterJoin()
(in
RDD
)
recommendation
(in
pyspark.mllib
)
regression
(in
pyspark.mllib
)
reduce()
(in
RDD
)
RidgeRegressionModel
(in
pyspark.mllib.regression
)
S
sample()
(in
RDD
)
setExecutorEnv()
(in
SparkConf
)
stdev()
(in
RDD
)
sampleStdev()
(in
RDD
)
setMaster()
(in
SparkConf
)
stdev()
(in
StatCounter
)
sampleStdev()
(in
StatCounter
)
setSparkHome()
(in
SparkConf
)
stop()
(in
SparkContext
)
sampleVariance()
(in
RDD
)
setSystemProperty()
(in
SparkContext
)
storagelevel
(in
pyspark
)
sampleVariance()
(in
StatCounter
)
sortByKey()
(in
RDD
)
StorageLevel
(in
pyspark.storagelevel
)
saveAsTextFile()
(in
RDD
)
SparkConf
(in
pyspark.conf
)
subtract()
(in
RDD
)
serializers
(in
pyspark
)
SparkContext
(in
pyspark.context
)
subtractByKey()
(in
RDD
)
set()
(in
SparkConf
)
SparkFiles
(in
pyspark.files
)
sum()
(in
RDD
)
setAll()
(in
SparkConf
)
statcounter
(in
pyspark
)
sum()
(in
StatCounter
)
setAppName()
(in
SparkConf
)
StatCounter
(in
pyspark.statcounter
)
SVMModel
(in
pyspark.mllib.classification
)
setCheckpointDir()
(in
SparkContext
)
stats()
(in
RDD
)
SVMWithSGD
(in
pyspark.mllib.classification
)
T
take()
(in
RDD
)
train()
(in
NaiveBayes
)
train()
(in
LinearRegressionWithSGD
)
takeSample()
(in
RDD
)
train()
(in
SVMWithSGD
)
train()
(in
RidgeRegressionWithSGD
)
textFile()
(in
SparkContext
)
train()
(in
KMeans
)
trainImplicit()
(in
ALS
)
toDebugString()
(in
SparkConf
)
train()
(in
ALS
)
train()
(in
LogisticRegressionWithSGD
)
train()
(in
LassoWithSGD
)
U
union()
(in
SparkContext
)
union()
(in
RDD
)
unpersist()
(in
RDD
)
V
value()
(in
Accumulator
)
variance()
(in
RDD
)
variance()
(in
StatCounter
)
Z
zero()
(in
AccumulatorParam
)
zero()
(in
AddingAccumulatorParam
)
_
__add__()
(in
RDD
)
__init__()
(in
MatrixFactorizationModel
)
_active_spark_context
(in
SparkContext
)
__del__()
(in
SparkContext
)
__init__()
(in
LinearModel
)
_broadcastRegistry
(in
pyspark.broadcast
)
__del__()
(in
MatrixFactorizationModel
)
__init__()
(in
RDD
)
_gateway
(in
SparkContext
)
__iadd__()
(in
Accumulator
)
__init__()
(in
StatCounter
)
_is_running_on_worker
(in
SparkFiles
)
__init__()
(in
Accumulator
)
__init__()
(in
StorageLevel
)
_jvm
(in
SparkContext
)
__init__()
(in
AddingAccumulatorParam
)
__reduce__()
(in
Accumulator
)
_lock
(in
SparkContext
)
__init__()
(in
Broadcast
)
__reduce__()
(in
Broadcast
)
_next_accum_id
(in
SparkContext
)
__init__()
(in
SparkConf
)
__repr__()
(in
Accumulator
)
_python_includes
(in
SparkContext
)
__init__()
(in
SparkContext
)
__repr__()
(in
RDD
)
_root_directory
(in
SparkFiles
)
__init__()
(in
SparkFiles
)
__repr__()
(in
StatCounter
)
_sc
(in
SparkFiles
)
__init__()
(in
NaiveBayesModel
)
__str__()
(in
Accumulator
)
_spark_stack_depth
(in
pyspark.rdd
)
__init__()
(in
KMeansModel
)
_accumulatorRegistry
(in
pyspark.accumulators
)
_writeToFile
(in
SparkContext
)
Home
Trees
Indices
Help
PySpark
Generated by Epydoc 3.0.1 on Sun Mar 2 16:35:00 2014
http://epydoc.sourceforge.net