Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ libraryDependencies += "com.github.zafarkhaja" % "java-semver" % "0.9.0" % "test

parallelExecution := false

scalacOptions ++= Seq("-deprecation", "-feature")

scalacOptions in (Compile, doc) ++= Seq(
"-groups",
"-implicits",
Expand Down
20 changes: 12 additions & 8 deletions src/main/scala/org/graphframes/lib/ShortestPaths.scala
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@ import java.util
import scala.collection.JavaConverters._

import org.apache.spark.graphx.{lib => graphxlib}
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.apache.spark.sql.api.java.UDF1
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types.{IntegerType, MapType}
import org.apache.spark.sql.{Column, DataFrame, Row}

import org.graphframes.GraphFrame

Expand Down Expand Up @@ -72,18 +73,21 @@ private object ShortestPaths {
val g = GraphXConversions.fromGraphX(graph, gx, vertexNames = Seq(DISTANCE_ID))
val distanceCol: Column = if (graph.hasIntegralIdType) {
// It seems there are no easy way to convert a sequence of pairs into a map
val mapToLandmark = udf({ distances: Seq[Row] =>
val mapToLandmark = udf { distances: Seq[Row] =>
distances.map { case Row(k: Long, v: Int) =>
k -> v
}.toMap
}, MapType(idType, IntegerType, false))
}
mapToLandmark(g.vertices(DISTANCE_ID))
} else {
val mapToLandmark = udf({ distances: Seq[Row] =>
distances.map { case Row(k: Long, v: Int) =>
longIdToLandmark(k) -> v
}.toMap
}, MapType(idType, IntegerType, false))
val func = new UDF1[Seq[Row], Map[Any, Int]] {
override def call(t1: Seq[Row]): Map[Any, Int] = {
t1.map { case Row(k: Long, v: Int) =>
longIdToLandmark(k) -> v
}.toMap
}
}
val mapToLandmark = udf(func, MapType(idType, IntegerType, false))
mapToLandmark(col(DISTANCE_ID))
}
val cols = graph.vertices.columns.map(col) :+ distanceCol.as(DISTANCE_ID)
Expand Down
2 changes: 1 addition & 1 deletion src/main/scala/org/graphframes/pattern/patterns.scala
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ private[graphframes] object PatternParser extends RegexParsers {
private val edge: Parser[Edge] = namedEdge | anonymousEdge
private val negatedEdge: Parser[Pattern] =
"!" ~ edge ^^ {
case "!" ~ e => Negation(e)
case _ ~ e => Negation(e)
}
private val pattern: Parser[Pattern] = edge | vertex | negatedEdge
val patterns: Parser[List[Pattern]] = repsep(pattern, ";")
Expand Down
2 changes: 2 additions & 0 deletions src/test/resources/log4j.properties
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,5 @@ log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR
log4j.logger.org.apache.spark=WARN
log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
# Hide many "WARN CacheManager: Asked to cache already cached data" warnings.
log4j.logger.org.apache.spark.sql.execution.CacheManager=ERROR