Hive单节点核心配置hive-site.xml
【代码】Hive核心配置hive-site.xml。
·
1. hive-env.sh
[wangguowei@centos01 conf]$ cat hive-env.sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hive and Hadoop environment variables here. These variables can be used
# to control the execution of Hive. It should be used by admins to configure
# the Hive installation (so that users do not have to set environment variables
# or set command line parameters to get correct behavior).
#
# The hive service being invoked (CLI etc.) is available via the environment
# variable SERVICE
# Hive Client memory usage can be an issue if a large number of clients
# are running at the same time. The flags below have been useful in
# reducing memory usage:
#
# if [ "$SERVICE" = "cli" ]; then
# if [ -z "$DEBUG" ]; then
# export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
# else
# export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
# fi
# fi
# The heap size of the jvm stared by hive shell script can be controlled via:
#
# export HADOOP_HEAPSIZE=1024
#
# Larger heap size may be required when running queries over large number of files or partitions.
# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be
# appropriate for hive server.
# Set HADOOP_HOME to point to a specific hadoop install directory
# HADOOP_HOME=${bin}/../../hadoop
export HADOOP_HOME=/opt/modules/hadoop-2.8.2
# Hive Configuration Directory can be controlled by:
# export HIVE_CONF_DIR=
# Folder containing extra libraries required for hive compilation/execution can be controlled by:
# export HIVE_AUX_JARS_PATH=
2. /opt/modules/apache-hive-2.3.3-bin/conf/hive-site.xml
<configuration>
<!-- 上面的内容是必须的配置,配置数据库的四大内容 -->
<!-- 数据库连接地址配置 -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://192.168.170.133:3306/hive_db?serverTimezone=UTC&useSSL=false&allowPublicKeyRetrieval=true</value>
<description>
JDBC connect string for a JDBC metastore.
</description>
</property>
<!-- 数据库驱动配置 -->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<!-- 数据库用户名 -->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>Username to use against metastore database</description>
</property>
<!-- 数据库访问密码 -->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>wgw63414</value>
<description>password to use against metastore database</description>
</property>
<!-- 下面的内容是可选的配置,可以让hive更加实用:配置日志等其他存储目录 -->
<!--hive的临时数据目录,指定的位置在hdfs上的目录-->
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
<description>location of default database for the warehouse</description>
</property>
<!-- scratchdir 本地目录 -->
<property>
<name>hive.exec.local.scratchdir</name>
<value>/tmp/hive</value>
<description>Local scratch space for Hive jobs</description>
</property>
<!--hive的临时数据目录,指定的位置在hdfs上的目录-->
<property>
<name>hive.exec.scratchdir</name>
<value>/tmp/hive</value>
<description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, with ${hive.scratch.dir.permission}.</description>
</property>
<!-- resources_dir 本地目录 -->
<property>
<name>hive.downloaded.resources.dir</name>
<value>/tmp/hive</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<!-- querylog 本地目录 -->
<property>
<name>hive.querylog.location</name>
<value>/tmp/hive</value>
<description>Location of Hive run time structured log file</description>
</property>
<!-- operation_logs 本地目录 -->
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/tmp/hive</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</description>
</property>
<!--Hive常见属性配置-->
<!--显示数据库名称和数据表的列名-->
<property>
<name>hive.cli.print.current.db</name>
<value>true</value>
</property>
<property>
<name>hive.cli.print.header</name>
<value>true</value>
</property>
<!--Hive和HBase进行整合-->
<property>
<name>hive.zookeeper.quorum</name>
<value>centos01:2181,centos02:2181,centos03:2181</value>
</property>
<property>
<name>hive.aux.jars.path</name>
<value>
file:///opt/modules/hbase-1.2.6.1/lib/hbase-common-1.2.6.1.jar,
file:///opt/modules/hbase-1.2.6.1/lib/hbase-client-1.2.6.1.jar,
file:///opt/modules/hbase-1.2.6.1/lib/hbase-server-1.2.6.1.jar,
file:///opt/modules/hbase-1.2.6.1/lib/hbase-hadoop2-compat-1.2.6.1.jar,
file:///opt/modules/hbase-1.2.6.1/lib/netty-all-4.0.23.Final.jar,
file:///opt/modules/hbase-1.2.6.1/lib/hbase-protocol-1.2.6.1.jar,
file:///opt/modules/zookeeper-3.4.10/zookeeper-3.4.10.jar
</value>
</property>
</configuration>

魔乐社区(Modelers.cn) 是一个中立、公益的人工智能社区,提供人工智能工具、模型、数据的托管、展示与应用协同服务,为人工智能开发及爱好者搭建开放的学习交流平台。社区通过理事会方式运作,由全产业链共同建设、共同运营、共同享有,推动国产AI生态繁荣发展。
更多推荐
所有评论(0)