技术标签: spring Sequoiadb学习笔记 java 数据库
su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
CREATE USER 'metauser'@'%' IDENTIFIED BY 'metauser';
GRANT ALL ON *.* TO 'metauser'@'%';
CREATE DATABASE metastore CHARACTER SET 'latin1' COLLATE 'latin1_bin';
FLUSH PRIVILEGES;
quit;
cat > /opt/apache-hive-1.2.2-bin/conf/hive-site.xml<< EOF
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>metauser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>metauser</value>
</property>
<property>
<name>hive.test.authz.sstd.hs2.mode</name>
<value>true</value>
</property>
<property>
<name>hive.server2.enable.doAs</name>
<value>true</value>
</property>
<property>
<name>hive.users.in.admin.role</name>
<value>root</value>
</property>
<property>
<name>hive.server2.thrift.port</name>
<value>9073</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>CUSTOM</value>
</property>
<property>
<name>hive.server2.custom.authentication.class</name>
<value>com.sequoiadb.spark.sql.hive.SequoiadbAuth</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
</property>
</configuration>
EOF
cp spark-authorizer-2.1.1.jar /opt/apache-hive-1.2.2-bin/auxlib
cp mysql-connector-java-5.1.7-bin.jar /opt/apache-hive-1.2.2-bin/auxlib
export HADOOP_HOME=/opt/hadoop-2.9.2
apache-hive-1.2.2-bin/bin/schematool -dbType mysql -initSchema
su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
use metastore;
create table DBUSER (dbuser varchar(100), passwd char(50), primary key (dbuser));
insert into DBUSER(dbuser, passwd) values ('root', md5('admin'));
为 thrift server 预先创建了一个 root 的用户,密码为 ‘admin’ 未来如果要增加用户,用类似的 insert 命令添加
delimiter ||
create trigger dbs_trigger
before insert on DBS
for each row
begin
set new.OWNER_NAME="public";
set new.OWNER_TYPE="ROLE";
end ||
delimiter ;
cp spark-authorizer-2.1.1.jar /opt/spark/jars
cp mysql-connector-java-5.1.7-bin.jar /opt/spark/jars
cat > /opt/spark/conf/hive-site.xml<< EOF
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>metauser</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>metauser</value>
</property>
<property>
<name>hive.security.authorization.createtable.owner.grants</name>
<value>INSERT,SELECT</value>
</property>
<property>
<name>hive.security.authorization.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory</value>
</property>
<property>
<name>hive.test.authz.sstd.hs2.mode</name>
<value>true</value>
</property>
<property>
<name>hive.server2.authentication</name>
<value>CUSTOM</value>
</property>
<property>
<name>hive.server2.custom.authentication.class</name>
<value>com.sequoiadb.spark.sql.hive.SequoiadbAuth</value>
</property>
</configuration>
EOF
spark.sql.extensions=org.apache.ranger.authorization.spark.authorizer.SequoiadbSparkSQLExtension
./opt/spark/sbin/start-all.sh
./opt/spark/sbin/start-thriftserver.sh
netstat -anp|grep 10000
./bin/beeline -u jdbc:hive2://localhost:10000 -n root -p admin
在 spark sql 中创建数据表,执行建表的USER 对该表拥有 INSERT 和 SELECT 权限 如果其他 USER希望访问该表,应该在 hive 的thrift server 中,执行 grant 命令,以赋予其他 USER 对应权限
${HIVE_HOME}/bin/hiveserver2 >${HIVE_HOME}/hive_thriftserver.log 2>&1 &
./bin/beeline -u jdbc:hive2://localhost:9073 -n root -p admin
set role admin;
grant SELECT on table test to user USERNAME;
grant INSERT on table test to user USERNAME;
var db=new Sdb("localhost",11810);
db.createDomain("scottdomain",["datagroup1","datagroup2","datagroup3"],{
AutoSplit:true});
db.createCS("scott",{
Domain:"scottdomain"});
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
create database scott;
use scott;
create table emp(
empno int unsigned auto_increment primary key COMMENT '雇员编号',
ename varchar(15) COMMENT '雇员姓名',
job varchar(10) COMMENT '雇员职位',
mgr int unsigned COMMENT '雇员对应的领导的编号',
hiredate date COMMENT '雇员的雇佣日期',
sal decimal(7,2) COMMENT '雇员的基本工资',
comm decimal(7,2) COMMENT '奖金',
deptno int unsigned COMMENT '所在部门'
)ENGINE = sequoiadb COMMENT = "雇员表, sequoiadb: { table_options: { ShardingKey: { 'empno': 1 }, ShardingType: 'hash', 'Compressed': true, 'CompressionType': 'lzw', 'AutoSplit': true, 'EnsureShardingIndex': false } }";
INSERT INTO emp VALUES (7369,'SMITH','CLERK',7902,'1980-12-17',800,NULL,20);
INSERT INTO emp VALUES (7499,'ALLEN','SALESMAN',7698,'1981-2-20',1600,300,30);
INSERT INTO emp VALUES (7521,'WARD','SALESMAN',7698,'1981-2-22',1250,500,30);
INSERT INTO emp VALUES (7566,'JONES','MANAGER',7839,'1981-4-2',2975,NULL,20);
INSERT INTO emp VALUES (7654,'MARTIN','SALESMAN',7698,'1981-9-28',1250,1400,30);
INSERT INTO emp VALUES (7698,'BLAKE','MANAGER',7839,'1981-5-1',2850,NULL,30);
INSERT INTO emp VALUES (7782,'CLARK','MANAGER',7839,'1981-6-9',2450,NULL,10);
INSERT INTO emp VALUES (7788,'SCOTT','ANALYST',7566,'87-7-13',3000,NULL,20);
INSERT INTO emp VALUES (7839,'KING','PRESIDENT',NULL,'1981-11-17',5000,NULL,10);
INSERT INTO emp VALUES (7844,'TURNER','SALESMAN',7698,'1981-9-8',1500,100,30);
INSERT INTO emp VALUES (7876,'ADAMS','CLERK',7788,'87-7-13',1100,NULL,20);
INSERT INTO emp VALUES (7900,'JAMES','CLERK',7698,'1981-12-3',950,NULL,30);
INSERT INTO emp VALUES (7902,'FORD','ANALYST',7566,'1981-12-3',3000,NULL,20);
INSERT INTO emp VALUES (7934,'MILLER','CLERK',7782,'1982-1-23',1300,NULL,10);
<!--添加druid连接池依赖-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.58</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid-spring-boot-starter</artifactId>
<version>1.1.18</version>
</dependency>
<!-- mybatis依赖 -->
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-core</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-extension</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!--添加 spark通过jdbc连接的依赖包-->
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-metastore</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.2</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.4</version>
</dependency>
<dependency>
<groupId>org.apache.thrift</groupId>
<artifactId>libthrift</artifactId>
<version>0.9.2</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.10</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-hive-thriftserver -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive-thriftserver_2.11</artifactId>
<version>2.0.1</version>
<scope>provided</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-network-common -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-network-common_2.11</artifactId>
<version>2.0.1</version>
</dependency>
<!--添加SequoiaDB驱动包-->
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>sequoiadb-driver</artifactId>
<version>3.2.1</version>
</dependency>
<!--添加SequoiaDB和spark连接驱动包-->
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>spark-sequoiadb_2.11</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>com.sequoiadb</groupId>
<artifactId>spark-sequoiadb-scala_2.11.2</artifactId>
<version>1.12</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-sql -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.2.2</version>
</dependency>
<!--添加hive jdbc连接组件-->
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>3.2.0</version>
</dependency>
server.port=8090
#datasource config
#指定连接池类型
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
#指定驱动
spring.datasource.driver-class-name=org.apache.hive.jdbc.HiveDriver
#指定连接地址、用户名和密码
spring.datasource.url=jdbc:hive2://192.168.80.132:10000/default
spring.datasource.username=root
spring.datasource.password=admin
#初始化连接数量
spring.datasource.druid.initialSize=1
#最大空闲连接数
spring.datasource.druid.minIdle=5
#最大并发连接数
spring.datasource.druid.maxActive=20
#配置获取连接等待超时的时间
spring.datasource.druid.maxWait=60000
#配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
spring.datasource.druid.timeBetweenEvictionRunMillis=60000
#配置一个连接在池中最小生存的时间,单位是毫秒
spring.datasource.druid.minEvictableIdelTimeMillis=300000
#用来检测连接是否有效的sql,要求是一个查询语句
spring.datasource.druid.validation-query=SELECT 1
#mybatis
#mybatis-plus.mapper-locations=classpath:mapper/*.xml
#mybatis-plus.configuration.cache-enabled=false
#映射xml文件位置
mybatis.mapper-locations=classpath:mapper/*.xml
#需要扫描实体类的位置
mybatis.type-aliases-package=com.sdb.spark.demo.entity
#spring mvc配置静态文件
spring.mvc.static-path-pattern=/static/**
#热部署
spring.devtools.restart.enabled=true
spring.devtools.restart.additional-paths=src/main/java
spring.devtools.restart.exclude=WEB-INF/**
/**
* 雇员表
*
* @author yousongxian
* @date 2020-07-29
*/
public class Emp {
private Integer empno;//雇员编号
private String ename;//雇员姓名、
private String job;//雇员职位
private Integer mgr;//雇员对应的领导的编号
private String hiredate;//雇员的雇佣日期
private Double sal;//雇员的基本工资
private Double comm;//奖金
private Integer deptno;//所在部门
}
//省略getter和setter方法
@Override
public String toString() {
return "Emp{" +
"empno=" + empno +
", ename='" + ename + '\'' +
", job='" + job + '\'' +
", mgr=" + mgr +
", hiredate='" + hiredate + '\'' +
", sal=" + sal +
", comm=" + comm +
", deptno=" + deptno +
'}';
}
<!-- 通用查询映射结果 -->
<resultMap id="BaseResultMap" type="com.sdb.spark.demo.entity.Emp">
<id column="empno" property="empno"/>
<result column="ename" property="ename" />
<result column="job" property="job" />
<result column="mgr" property="mgr" />
<result column="hiredate" property="hiredate" />
<result column="sal" property="sal" />
<result column="comm" property="comm" />
<result column="deptno" property="deptno" />
</resultMap>
<!-- 通用查询结果列 -->
<select id="selectAll" resultType="map" parameterType="string">
select * from ${tablename}
</select>
<update id="createTableEmp">
CREATE TABLE emp
(
empno INT,
ename STRING,
job STRING,
mgr INT,
hiredate date,
sal decimal(7,2),
comm decimal(7,2),
deptno INT
)
USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)</update>
<update id="createTableEmpSchema">
CREATE TABLE emp_schema USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)
</update>
<update id="createTableAsSelect" parameterType="map">
CREATE TABLE ${tablename} USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
domain 'scottdomain',
collectionspace 'scott',
collection #{tablename},
shardingkey '{"_id":1}',
shadingtype 'hash',
autosplit true
)AS ${condition}
</update>
<mapper namespace="com.sdb.spark.demo.mapper.EmpMapper">
注意:方法名称跟xml映射文件中定义的方法id必须一致
List<Map<String,Object>> selectAll(String tablename);//查询全部
int createTableEmp();//创建emp表
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String>map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
List<Map<String,Object>> selectAll(String tablename);
int createTableEmp();
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String> map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
@Service
public class EmpServiceImpl implements EmpService {
@Autowired
private EmpMapper empMapper;
@Override
public List<Map<String,Object>> selectAll(String tablename) {
return empMapper.selectAll(tablename);
}
@Override
public int createTableEmp() {
return empMapper.createTableEmp();
}
@Override
public int createTableEmpSchema() {
return empMapper.createTableEmpSchema();
}
@Override
public int createTableAsSelect(Map<String,String>map) {
return empMapper.createTableAsSelect(map);
}
@Override
public int insertEmp(Emp emp) {
return empMapper.insertEmp(emp);
}
}
@Autowired
private EmpService empService;
@Test
public List<Map<String,Object>> selectAll(){
String tablename="emp";
List<Map<String,Object>>resultlist=new ArrayList<Map<String, Object>>();
resultlist =empService.selectAll(tablename);
for(Map<String,Object>map:resultlist){
for(Map.Entry<String,Object>m:map.entrySet()){
System.out.print(m.getKey()+"="+m.getValue()+"\t");
}
System.out.println();
}
return resultlist;
}
@Test
public void createTable(){
empService.createTableEmp();
}
@Test
public void createTableEmpSchema(){
empService.createTableEmpSchema();
}
@Test
public void createTableAsSelect(){
Map<String,String> map=new HashMap<String, String>();
String tablename="emp_as_select";
//String condition="select empno,ename from emp";
map.put("tablename",tablename);
map.put("condition",condition);
if(map.get("tablename").equals("")||map.get("tablename")==null||map.get("condition").equals("")||map.get("condition")==null){
System.out.println("请输入正确表明和条件");
}else {
empService.createTableAsSelect(map);
}
}
@SpringBootApplication(scanBasePackages = {
"com.sdb.spark.demo.service.Impl"})
@MapperScan(basePackages = {
"com.sdb.spark.demo.mapper"})
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
}
文章浏览阅读290次,点赞8次,收藏10次。1.背景介绍稀疏编码是一种用于处理稀疏数据的编码技术,其主要应用于信息传输、存储和处理等领域。稀疏数据是指数据中大部分元素为零或近似于零的数据,例如文本、图像、音频、视频等。稀疏编码的核心思想是将稀疏数据表示为非零元素和它们对应的位置信息,从而减少存储空间和计算复杂度。稀疏编码的研究起源于1990年代,随着大数据时代的到来,稀疏编码技术的应用范围和影响力不断扩大。目前,稀疏编码已经成为计算...
文章浏览阅读217次。EasyGBS - GB28181 国标方案安装使用文档下载安装包下载,正式使用需商业授权, 功能一致在线演示在线API架构图EasySIPCMSSIP 中心信令服务, 单节点, 自带一个 Redis Server, 随 EasySIPCMS 自启动, 不需要手动运行EasySIPSMSSIP 流媒体服务, 根..._easygbs-windows-2.6.0-23042316使用文档
文章浏览阅读1.2k次,点赞27次,收藏7次。2023巅峰极客 BabyURL之前AliyunCTF Bypassit I这题考查了这样一条链子:其实就是Jackson的原生反序列化利用今天复现的这题也是大同小异,一起来整一下。_原生jackson 反序列化链子
文章浏览阅读734次,点赞9次,收藏7次。微服务架构简单的说就是将单体应用进一步拆分,拆分成更小的服务,每个服务都是一个可以独立运行的项目。这么多小服务,如何管理他们?(服务治理 注册中心[服务注册 发现 剔除])这么多小服务,他们之间如何通讯?这么多小服务,客户端怎么访问他们?(网关)这么多小服务,一旦出现问题了,应该如何自处理?(容错)这么多小服务,一旦出现问题了,应该如何排错?(链路追踪)对于上面的问题,是任何一个微服务设计者都不能绕过去的,因此大部分的微服务产品都针对每一个问题提供了相应的组件来解决它们。_spring cloud
文章浏览阅读5.9k次,点赞6次,收藏20次。Js实现图片点击切换与轮播图片点击切换<!DOCTYPE html><html> <head> <meta charset="UTF-8"> <title></title> <script type="text/ja..._点击图片进行轮播图切换
文章浏览阅读10w+次,点赞245次,收藏1.5k次。在开始安装前,如果你的电脑装过tensorflow,请先把他们卸载干净,包括依赖的包(tensorflow-estimator、tensorboard、tensorflow、keras-applications、keras-preprocessing),不然后续安装了tensorflow-gpu可能会出现找不到cuda的问题。cuda、cudnn。..._tensorflow gpu版本安装
文章浏览阅读243次。0x00 简介权限滥用漏洞一般归类于逻辑问题,是指服务端功能开放过多或权限限制不严格,导致攻击者可以通过直接或间接调用的方式达到攻击效果。随着物联网时代的到来,这种漏洞已经屡见不鲜,各种漏洞组合利用也是千奇百怪、五花八门,这里总结漏洞是为了更好地应对和预防,如有不妥之处还请业内人士多多指教。0x01 背景2014年4月,在比特币飞涨的时代某网站曾经..._使用物联网漏洞的使用者
文章浏览阅读786次。A. Epipolar geometry and triangulationThe epipolar geometry mainly adopts the feature point method, such as SIFT, SURF and ORB, etc. to obtain the feature points corresponding to two frames of images. As shown in Figure 1, let the first image be and th_normalized plane coordinates
文章浏览阅读708次,点赞2次,收藏3次。开放信息抽取(OIE)系统(三)-- 第二代开放信息抽取系统(人工规则, rule-based, 先关系再实体)一.第二代开放信息抽取系统背景 第一代开放信息抽取系统(Open Information Extraction, OIE, learning-based, 自学习, 先抽取实体)通常抽取大量冗余信息,为了消除这些冗余信息,诞生了第二代开放信息抽取系统。二.第二代开放信息抽取系统历史第二代开放信息抽取系统着眼于解决第一代系统的三大问题: 大量非信息性提取(即省略关键信息的提取)、_语义角色增强的关系抽取
文章浏览阅读1.1w次,点赞6次,收藏51次。快速完成网页设计,10个顶尖响应式HTML5网页模板助你一臂之力为了寻找一个优质的网页模板,网页设计师和开发者往往可能会花上大半天的时间。不过幸运的是,现在的网页设计师和开发人员已经开始共享HTML5,Bootstrap和CSS3中的免费网页模板资源。鉴于网站模板的灵活性和强大的功能,现在广大设计师和开发者对html5网站的实际需求日益增长。为了造福大众,Mockplus的小伙伴整理了2018年最..._html欢迎页面
文章浏览阅读282次。原标题:2018全国计算机等级考试调整,一、二级都增加了考试科目全国计算机等级考试将于9月15-17日举行。在备考的最后冲刺阶段,小编为大家整理了今年新公布的全国计算机等级考试调整方案,希望对备考的小伙伴有所帮助,快随小编往下看吧!从2018年3月开始,全国计算机等级考试实施2018版考试大纲,并按新体系开考各个考试级别。具体调整内容如下:一、考试级别及科目1.一级新增“网络安全素质教育”科目(代..._计算机二级增报科目什么意思
文章浏览阅读240次。conan简单使用。_apt install conan