赞
踩
set ngmr.exec.mode=cluster
时,想看一下
ngmr.exec.mode
参数原先的值是什么,所以写一下本篇博文,讲一下怎么查看hive中的参数。
案例一:set 具体参数
# 返回mapreduce.map.memory.mb参数的值
set mapreduce.map.memory.mb;
案例二:set 查询所有变量
set;
案例三:set -v查询所有环境变量
hive> set -v; silent=off fs.s3n.impl=org.apache.hadoop.fs.s3native.NativeS3FileSystem datanucleus.validateColumns=false mapred.task.cache.levels=2 hadoop.tmp.dir=/home/hexianghui/datahadoop hadoop.native.lib=true map.sort.class=org.apache.hadoop.util.QuickSort ipc.client.idlethreshold=4000 mapred.system.dir=${hadoop.tmp.dir}/mapred/system hive.script.serde=org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe mapred.job.tracker.persist.jobstatus.hours=0 io.skip.checksum.errors=false fs.default.name=hdfs://192.168.0.4:9000 mapred.child.tmp=./tmp datanucleus.cache.level2=false mapred.skip.reduce.max.skip.groups=0 mapred.jobtracker.instrumentation=org.apache.hadoop.mapred.JobTrackerMetricsInst mapred.tasktracker.dns.nameserver=default io.sort.factor=10 hive.metastore.rawstore.impl=org.apache.hadoop.hive.metastore.ObjectStore hive.metastore.local=true mapred.task.timeout=600000 mapred.max.tracker.failures=4 hadoop.rpc.socket.factory.class.default=org.apache.hadoop.net.StandardSocketFactory fs.hdfs.impl=org.apache.hadoop.hdfs.DistributedFileSystem mapred.queue.default.acl-administer-jobs=* mapred.queue.default.acl-submit-job=* mapred.skip.map.auto.incr.proc.count=true io.mapfile.bloom.size=1048576 tasktracker.http.threads=40 mapred.job.shuffle.merge.percent=0.66 fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem io.bytes.per.checksum=512 mapred.output.compress=false hive.test.mode.prefix=test_ hive.test.mode=false hive.exec.compress.intermediate=false topology.node.switch.mapping.impl=org.apache.hadoop.net.ScriptBasedMapping datanucleus.cache.level2.type=SOFT mapred.reduce.slowstart.completed.maps=0.05 mapred.reduce.max.attempts=4 fs.ramfs.impl=org.apache.hadoop.fs.InMemoryFileSystem javax.jdo.option.ConnectionUserName=APP mapred.skip.map.max.skip.records=0 hive.merge.mapfiles=true hive.merge.size.smallfiles.avgsize=16000000 hive.test.mode.samplefreq=32 hive.optimize.skewjoin=false mapred.job.tracker.persist.jobstatus.dir=/jobtracker/jobsInfo fs.s3.buffer.dir=${hadoop.tmp.dir}/s3 hive.map.aggr.hash.min.reduction=0.5 job.end.retry.attempts=0 fs.file.impl=org.apache.hadoop.fs.LocalFileSystem mapred.local.dir.minspacestart=0 hive.exec.compress.output=false mapred.output.compression.type=RECORD hive.script.recordreader=org.apache.hadoop.hive.ql.exec.TextRecordReader topology.script.number.args=100 io.mapfile.bloom.error.rate=0.005 hive.exec.parallel.thread.number=8 mapred.max.tracker.blacklists=4 mapred.task.profile.maps=0-2 mapred.userlog.retain.hours=24 datanucleus.storeManagerType=rdbms mapred.job.tracker.persist.jobstatus.active=false hive.script.operator.id.env.var=HIVE_SCRIPT_OPERATOR_ID hadoop.security.authorization=false local.cache.size=10737418240 mapred.min.split.size=0 mapred.map.tasks=2 mapred.child.java.opts=-Xmx200m hive.skewjoin.mapjoin.min.split=33554432 hive.metastore.warehouse.dir=/user/hive/warehouse mapred.job.queue.name=default hive.mapjoin.bucket.cache.size=100 datanucleus.transactionIsolation=read-committed ipc.server.listen.queue.size=128 mapred.inmem.merge.threshold=1000 job.end.retry.interval=30000 mapred.skip.attempts.to.start.skipping=2 fs.checkpoint.dir=${hadoop.tmp.dir}/dfs/namesecondary mapred.reduce.tasks=-1 mapred.merge.recordsBeforeProgress=10000 mapred.userlog.limit.kb=0 hive.skewjoin.key=100000 javax.jdo.option.ConnectionDriverName=org.apache.derby.jdbc.EmbeddedDriver webinterface.private.actions=false mapred.job.shuffle.input.buffer.percent=0.70 io.sort.spill.percent=0.80 hive.udtf.auto.progress=false hive.session.id=hexianghui_201002232043 mapred.map.tasks.speculative.execution=true hadoop.util.hash.type=murmur hive.exec.script.maxerrsize=100000 hive.optimize.groupby=true mapred.map.max.attempts=4 hive.default.fileformat=TextFile hive.exec.scratchdir=/tmp/hive-${user.name} mapred.job.tracker.handler.count=10 hive.script.recordwriter=org.apache.hadoop.hive.ql.exec.TextRecordWriter hive.join.emit.interval=1000 datanucleus.validateConstraints=false mapred.tasktracker.expiry.interval=600000 mapred.jobtracker.maxtasks.per.job=-1 mapred.jobtracker.job.history.block.size=3145728 keep.failed.task.files=false ipc.client.tcpnodelay=false mapred.task.profile.reduces=0-2 mapred.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodec io.map.index.skip=0 ipc.server.tcpnodelay=false hive.join.cache.size=25000 datanucleus.autoStartMechanismMode=checked hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat hadoop.logfile.size=10000000 mapred.reduce.tasks.speculative.execution=true hive.skewjoin.mapjoin.map.tasks=10000 hive.hwi.listen.port=9999 fs.checkpoint.period=3600 mapred.job.reuse.jvm.num.tasks=1 mapred.jobtracker.completeuserjobs.maximum=100 hive.groupby.mapaggr.checkinterval=100000 fs.s3.maxRetries=4 javax.jdo.option.ConnectionURL=jdbc:derby:;databaseName=metastore_db;create=true hive.mapred.mode=nonstrict hive.groupby.skewindata=false hive.exec.parallel=false mapred.local.dir=${hadoop.tmp.dir}/mapred/local fs.hftp.impl=org.apache.hadoop.hdfs.HftpFileSystem fs.s3.sleepTimeSeconds=10 fs.trash.interval=0 mapred.submit.replication=10 hive.merge.size.per.task=256000000 fs.har.impl=org.apache.hadoop.fs.HarFileSystem mapred.map.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodec hive.exec.reducers.max=999 mapred.tasktracker.dns.interface=default mapred.job.tracker=192.168.0.4:9001 io.seqfile.sorter.recordlimit=1000000 hive.optimize.ppd=true mapred.line.input.format.linespermap=1 mapred.jobtracker.taskScheduler=org.apache.hadoop.mapred.JobQueueTaskScheduler mapred.tasktracker.instrumentation=org.apache.hadoop.mapred.TaskTrackerMetricsInst hive.mapjoin.cache.numrows=25000 hive.merge.mapredfiles=false hive.metastore.connect.retries=5 hive.fileformat.check=true mapred.tasktracker.procfsbasedprocesstree.sleeptime-before-sigkill=5000 javax.jdo.option.DetachAllOnCommit=true mapred.local.dir.minspacekill=0 hive.optimize.pruner=true javax.jdo.option.ConnectionPassword=mine hive.hwi.listen.host=0.0.0.0 io.sort.record.percent=0.05 hive.map.aggr.hash.percentmemory=0.5 fs.kfs.impl=org.apache.hadoop.fs.kfs.KosmosFileSystem mapred.temp.dir=${hadoop.tmp.dir}/mapred/temp mapred.tasktracker.reduce.tasks.maximum=2 javax.jdo.PersistenceManagerFactoryClass=org.datanucleus.jdo.JDOPersistenceManagerFactory hive.mapred.local.mem=0 fs.checkpoint.edits.dir=${fs.checkpoint.dir} mapred.job.reduce.input.buffer.percent=0.0 datanucleus.validateTables=false mapred.tasktracker.indexcache.mb=10 hadoop.logfile.count=10 mapred.skip.reduce.auto.incr.proc.count=true hive.script.auto.progress=false io.seqfile.compress.blocksize=1000000 fs.s3.block.size=67108864 mapred.tasktracker.taskmemorymanager.monitoring-interval=5000 datanucleus.autoCreateSchema=true mapred.acls.enabled=false mapred.queue.names=default fs.hsftp.impl=org.apache.hadoop.hdfs.HsftpFileSystem hive.map.aggr=true hive.enforce.bucketing=false mapred.task.tracker.http.address=0.0.0.0:50060 mapred.reduce.parallel.copies=5 io.seqfile.lazydecompress=true hive.exec.script.allow.partial.consumption=false io.sort.mb=100 ipc.client.connection.maxidletime=10000 mapred.task.tracker.report.address=127.0.0.1:0 mapred.compress.map.output=false hive.mapred.reduce.tasks.speculative.execution=true ipc.client.kill.max=10 ipc.client.connect.max.retries=10 hive.heartbeat.interval=1000 fs.s3.impl=org.apache.hadoop.fs.s3.S3FileSystem hive.mapjoin.maxsize=100000 mapred.job.tracker.http.address=0.0.0.0:50030 io.file.buffer.size=4096 mapred.jobtracker.restart.recover=false io.serializations=org.apache.hadoop.io.serializer.WritableSerialization hive.optimize.cp=true javax.jdo.option.NonTransactionalRead=true hive.exec.reducers.bytes.per.reducer=1000000000 mapred.reduce.copy.backoff=300 mapred.task.profile=false jobclient.output.filter=FAILED mapred.tasktracker.map.tasks.maximum=2 io.compression.codecs=org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec fs.checkpoint.size=67108864
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。