spark 读取Geomesa(Hbase)数据
阅读原文时间:2023年07月10日阅读:3
package com.grady.geomesa

import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.geotools.data.Query
import org.locationtech.geomesa.spark.{GeoMesaSpark, GeoMesaSparkKryoRegistrator, SpatialRDD}
import org.locationtech.geomesa.spark.jts._

import scala.collection.JavaConversions._

object SparkReadGeomesa {

  val GeomesaCatalog = "gradytest"
  val GeomesaCatalogFeature = "student"

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("SparkReadGeomesa")
    // 这里序列化配置非常关键,否则spark解析不出来数据
    conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    conf.set("spark.kryo.registrator", classOf[GeoMesaSparkKryoRegistrator].getName)
    val ss = SparkSession.builder().config(conf).getOrCreate().withJTS

    // 方法一报错:
    // java.lang.ClassNotFoundException: org.locationtech.geomesa.hbase.rpc.filter.CqlTransformFilter
    // 可能是环境配置关系
//    val dataFrame = readGeomesaData(ss)
//    showDataFrame(dataFrame, ss)

    // 方法二: ok
    val spatialRDD = readGeomesaDataToRDD(ss)
    showSpatialRDD(spatialRDD)

    ss.stop()
  }

  /**
   * 方法一: 获取dataFrame
   * @param ss
   * @return
   */
  def readGeomesaData(ss: SparkSession): DataFrame = {
    val params = Map(
      "hbase.zookeepers" -> "10.82.xxx.xx:2181",
      "hbase.catalog"  -> GeomesaCatalog)

    val dataFrame = ss.read
      .format("geomesa")
      .options(params)
      .option("geomesa.feature", GeomesaCatalogFeature)
      .load()
    dataFrame
  }

  def showDataFrame(dataFrame: DataFrame, ss: SparkSession): Unit = {
    dataFrame.show()
    println("-----------------------------------")
    dataFrame.createOrReplaceTempView("student")
    val sqlQuery = "select * from student"
    val resultDataFrame = ss.sql(sqlQuery)
    resultDataFrame.show()
  }

  /**
   * 方法二: 获取SpatialRDD
   * @param ss
   * @return
   */
  def readGeomesaDataToRDD(ss: SparkSession): SpatialRDD = {
    val params = Map(
    "hbase.zookeepers" -> "10.82.xxx.xx:2181",
    "hbase.catalog"  -> GeomesaCatalog)
    val spatialRDDProvider = GeoMesaSpark(params)
    val query = new Query(GeomesaCatalogFeature)
    val resultRDD = spatialRDDProvider.rdd(new Configuration, ss.sparkContext, params, query)
    resultRDD
  }

  def showSpatialRDD(spatialRDD: SpatialRDD): Unit = {
    spatialRDD.collect().foreach(row => {
      val geom = row.getAttribute("geom").toString
      val name = row.getAttribute("name").toString
      println("name:" + name + "  geom: " + geom)
    })
    println("-----------------------------------")
    spatialRDD.collect().foreach(println)
  }

}
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <parent>
        <artifactId>spark-practise</artifactId>
        <groupId>org.example</groupId>
        <version>1.0-SNAPSHOT</version>
    </parent>
    <modelVersion>4.0.0</modelVersion>

    <artifactId>geomesa</artifactId>

    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
        <geomesa.version>3.1.0</geomesa.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>${hadoop.version}</version>
        </dependency>

        <dependency>
                <groupId>org.locationtech.geomesa</groupId>
                <artifactId>geomesa-hbase-spark-runtime-hbase2_2.12</artifactId>
                <version>3.3.0</version>
            <exclusions>
                <exclusion>
                    <groupId>org.apache.hadoop</groupId>
                    <artifactId>hadoop-hdfs</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

        <dependency>
            <groupId>org.locationtech.geomesa</groupId>
            <artifactId>geomesa-spark-core_2.12</artifactId>
            <version>3.3.0</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
            <scope>provided</scope>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-yarn_${scala.binary.version}</artifactId>
            <version>${spark.version}</version>
            <scope>provided</scope>
        </dependency>
    </dependencies>

    <build>
        <resources>
            <resource>
                <directory>src/main/resources</directory>
                <filtering>true</filtering>
            </resource>
        </resources>

        <plugins>
            <plugin>
                <groupId>net.alchim31.maven</groupId>
                <artifactId>scala-maven-plugin</artifactId>
                <version>3.2.1</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <scalaVersion>${scala.version}</scalaVersion>
                </configuration>
                <executions>
                    <execution>
                        <id>scala-compile-first</id>
                        <phase>process-resources</phase>
                        <goals>
                            <goal>add-source</goal>
                            <goal>compile</goal>
                        </goals>
                    </execution>
                    <execution>
                        <id>scala-test-compile</id>
                        <phase>process-test-resources</phase>
                        <goals>
                            <goal>testCompile</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>

            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>3.2.1</version>
                <configuration>
                    <artifactSet>
                        <excludes>
                            <exclude>org.slf4j:*</exclude>
                        </excludes>
                    </artifactSet>
                </configuration>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <createDependencyReducedPom>false</createDependencyReducedPom>
                            <filters>
                                <filter>
                                    <artifact>*:*</artifact>
                                    <excludes>
                                        <exclude>META-INF/*.SF</exclude>
                                        <exclude>META-INF/*.DSA</exclude>
                                        <exclude>META-INF/*.RSA</exclude>
                                    </excludes>
                                </filter>
                            </filters>
                            <transformers>
                                <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
                            </transformers>
                        </configuration>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>
</project>

运行:spark-submit --master yarn --driver-memory=2G --class com.grady.geomesa.SparkReadGeomesa /app/data/appdeploy/geomesa-1.0-SNAPSHOT.jar

这里最好是在yarn上执行,因为本地执行可能内存不够而卡住,我就被坑了几次

执行日志:(因为多执行了几遍插入,所以有重复数据)
name:jack  geom: POINT (11.1 12.1)
name:Lily  geom: POINT (12.1 13.1)
name:jack  geom: POINT (11.1 12.1)
name:Lily  geom: POINT (12.1 13.1)
name:mike  geom: POINT (14.1 15.1)
name:jack  geom: POINT (11.1 12.1)
name:Lily  geom: POINT (12.1 13.1)
name:mike  geom: POINT (14.1 15.1)
name:mike  geom: POINT (14.1 15.1)
-----------------------------------
ScalaSimpleFeature:000017ed-e5d1-41f8-ae71-84db58b9478f:POINT (11.1 12.1)|1|jack|15
ScalaSimpleFeature:000017ed-e5d1-41f8-a308-efcee8b70bf9:POINT (12.1 13.1)|2|Lily|16
ScalaSimpleFeature:000017ed-e35c-4d77-a841-b3bcf6faa8ac:POINT (11.1 12.1)|1|jack|15
ScalaSimpleFeature:000017ed-e37a-4e60-9d7f-66988be48234:POINT (12.1 13.1)|2|Lily|16
ScalaSimpleFeature:000017ed-e35c-4e9a-8600-97ed8d92c48b:POINT (14.1 15.1)|3|mike|16
ScalaSimpleFeature:000017ed-e37a-4e60-b90f-93fc81e0ab0e:POINT (11.1 12.1)|1|jack|15
ScalaSimpleFeature:000017ed-e35c-4d77-99e7-c6918a06c008:POINT (12.1 13.1)|2|Lily|16
ScalaSimpleFeature:000017ed-e37a-4ebd-b3a5-a9c7399a635b:POINT (14.1 15.1)|3|mike|16
ScalaSimpleFeature:000017ed-e5d1-4257-a75d-b0e23729542e:POINT (14.1 15.1)|3|mike|16

手机扫一扫

移动阅读更方便

阿里云服务器
腾讯云服务器
七牛云服务器