当前位置:   article > 正文

HiveMetaStoreClient用法示例

hivemetastoreclient
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. package org.apache.hadoop.hive.metastore;
  19. import java.util.ArrayList;
  20. import java.util.HashMap;
  21. import java.util.HashSet;
  22. import java.util.LinkedHashMap;
  23. import java.util.List;
  24. import java.util.Map;
  25. import java.util.Set;
  26. import junit.framework.TestCase;
  27. import org.apache.hadoop.fs.FileSystem;
  28. import org.apache.hadoop.fs.Path;
  29. import org.apache.hadoop.hive.conf.HiveConf;
  30. import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
  31. import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
  32. import org.apache.hadoop.hive.metastore.api.Database;
  33. import org.apache.hadoop.hive.metastore.api.FieldSchema;
  34. import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
  35. import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
  36. import org.apache.hadoop.hive.metastore.api.MetaException;
  37. import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
  38. import org.apache.hadoop.hive.metastore.api.Order;
  39. import org.apache.hadoop.hive.metastore.api.Partition;
  40. import org.apache.hadoop.hive.metastore.api.SerDeInfo;
  41. import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
  42. import org.apache.hadoop.hive.metastore.api.Table;
  43. import org.apache.hadoop.hive.metastore.api.Type;
  44. import org.apache.hadoop.hive.serde.Constants;
  45. import org.apache.hadoop.util.StringUtils;
  46. import org.apache.thrift.TException;
  47. public abstract class TestHiveMetaStore extends TestCase {
  48. protected static HiveMetaStoreClient client;
  49. protected static HiveConf hiveConf;
  50. protected static Warehouse warehouse;
  51. protected static boolean isThriftClient = false;
  52. private static final String TEST_DB1_NAME = "testdb1";
  53. private static final String TEST_DB2_NAME = "testdb2";
  54. @Override
  55. protected void setUp() throws Exception {
  56. hiveConf = new HiveConf(this.getClass());
  57. warehouse = new Warehouse(hiveConf);
  58. // set some values to use for getting conf. vars
  59. hiveConf.set("hive.metastore.metrics.enabled","true");
  60. hiveConf.set("hive.key1", "value1");
  61. hiveConf.set("hive.key2", "http://www.example.com");
  62. hiveConf.set("hive.key3", "");
  63. hiveConf.set("hive.key4", "0");
  64. }
  65. public void testNameMethods() {
  66. Map<String, String> spec = new LinkedHashMap<String, String>();
  67. spec.put("ds", "2008-07-01 14:13:12");
  68. spec.put("hr", "14");
  69. List<String> vals = new ArrayList<String>();
  70. for(String v : spec.values()) {
  71. vals.add(v);
  72. }
  73. String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
  74. try {
  75. List<String> testVals = client.partitionNameToVals(partName);
  76. assertTrue("Values from name are incorrect", vals.equals(testVals));
  77. Map<String, String> testSpec = client.partitionNameToSpec(partName);
  78. assertTrue("Spec from name is incorrect", spec.equals(testSpec));
  79. List<String> emptyVals = client.partitionNameToVals("");
  80. assertTrue("Values should be empty", emptyVals.size() == 0);
  81. Map<String, String> emptySpec = client.partitionNameToSpec("");
  82. assertTrue("Spec should be empty", emptySpec.size() == 0);
  83. } catch (Exception e) {
  84. assert(false);
  85. }
  86. }
  87. /**
  88. * tests create table and partition and tries to drop the table without
  89. * droppping the partition
  90. *
  91. * @throws Exception
  92. */
  93. public void testPartition() throws Exception {
  94. partitionTester(client, hiveConf);
  95. }
  96. public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf)
  97. throws Exception {
  98. try {
  99. String dbName = "compdb";
  100. String tblName = "comptbl";
  101. String typeName = "Person";
  102. List<String> vals = new ArrayList<String>(2);
  103. vals.add("2008-07-01 14:13:12");
  104. vals.add("14");
  105. List <String> vals2 = new ArrayList<String>(2);
  106. vals2.add("2008-07-01 14:13:12");
  107. vals2.add("15");
  108. List <String> vals3 = new ArrayList<String>(2);
  109. vals3 = new ArrayList<String>(2);
  110. vals3.add("2008-07-02 14:13:12");
  111. vals3.add("15");
  112. List <String> vals4 = new ArrayList<String>(2);
  113. vals4 = new ArrayList<String>(2);
  114. vals4.add("2008-07-03 14:13:12");
  115. vals4.add("151");
  116. client.dropTable(dbName, tblName);
  117. silentDropDatabase(dbName);
  118. Database db = new Database();
  119. db.setName(dbName);
  120. client.createDatabase(db);
  121. client.dropType(typeName);
  122. Type typ1 = new Type();
  123. typ1.setName(typeName);
  124. typ1.setFields(new ArrayList<FieldSchema>(2));
  125. typ1.getFields().add(
  126. new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
  127. typ1.getFields().add(
  128. new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
  129. client.createType(typ1);
  130. Table tbl = new Table();
  131. tbl.setDbName(dbName);
  132. tbl.setTableName(tblName);
  133. StorageDescriptor sd = new StorageDescriptor();
  134. tbl.setSd(sd);
  135. sd.setCols(typ1.getFields());
  136. sd.setCompressed(false);
  137. sd.setNumBuckets(1);
  138. sd.setParameters(new HashMap<String, String>());
  139. sd.getParameters().put("test_param_1", "Use this for comments etc");
  140. sd.setBucketCols(new ArrayList<String>(2));
  141. sd.getBucketCols().add("name");
  142. sd.setSerdeInfo(new SerDeInfo());
  143. sd.getSerdeInfo().setName(tbl.getTableName());
  144. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  145. sd.getSerdeInfo().getParameters()
  146. .put(Constants.SERIALIZATION_FORMAT, "1");
  147. sd.setSortCols(new ArrayList<Order>());
  148. tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
  149. tbl.getPartitionKeys().add(
  150. new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
  151. tbl.getPartitionKeys().add(
  152. new FieldSchema("hr", Constants.STRING_TYPE_NAME, ""));
  153. client.createTable(tbl);
  154. if (isThriftClient) {
  155. // the createTable() above does not update the location in the 'tbl'
  156. // object when the client is a thrift client and the code below relies
  157. // on the location being present in the 'tbl' object - so get the table
  158. // from the metastore
  159. tbl = client.getTable(dbName, tblName);
  160. }
  161. Partition part = new Partition();
  162. part.setDbName(dbName);
  163. part.setTableName(tblName);
  164. part.setValues(vals);
  165. part.setParameters(new HashMap<String, String>());
  166. part.setSd(tbl.getSd());
  167. part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
  168. part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
  169. Partition part2 = new Partition();
  170. part2.setDbName(dbName);
  171. part2.setTableName(tblName);
  172. part2.setValues(vals2);
  173. part2.setParameters(new HashMap<String, String>());
  174. part2.setSd(tbl.getSd());
  175. part2.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
  176. part2.getSd().setLocation(tbl.getSd().getLocation() + "/part2");
  177. Partition part3 = new Partition();
  178. part3.setDbName(dbName);
  179. part3.setTableName(tblName);
  180. part3.setValues(vals3);
  181. part3.setParameters(new HashMap<String, String>());
  182. part3.setSd(tbl.getSd());
  183. part3.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
  184. part3.getSd().setLocation(tbl.getSd().getLocation() + "/part3");
  185. Partition part4 = new Partition();
  186. part4.setDbName(dbName);
  187. part4.setTableName(tblName);
  188. part4.setValues(vals4);
  189. part4.setParameters(new HashMap<String, String>());
  190. part4.setSd(tbl.getSd());
  191. part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
  192. part4.getSd().setLocation(tbl.getSd().getLocation() + "/part4");
  193. // check if the partition exists (it shouldn;t)
  194. boolean exceptionThrown = false;
  195. try {
  196. Partition p = client.getPartition(dbName, tblName, vals);
  197. } catch(Exception e) {
  198. assertEquals("partition should not have existed",
  199. NoSuchObjectException.class, e.getClass());
  200. exceptionThrown = true;
  201. }
  202. assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
  203. Partition retp = client.add_partition(part);
  204. assertNotNull("Unable to create partition " + part, retp);
  205. Partition retp2 = client.add_partition(part2);
  206. assertNotNull("Unable to create partition " + part2, retp2);
  207. Partition retp3 = client.add_partition(part3);
  208. assertNotNull("Unable to create partition " + part3, retp3);
  209. Partition retp4 = client.add_partition(part4);
  210. assertNotNull("Unable to create partition " + part4, retp4);
  211. Partition part_get = client.getPartition(dbName, tblName, part.getValues());
  212. if(isThriftClient) {
  213. // since we are using thrift, 'part' will not have the create time and
  214. // last DDL time set since it does not get updated in the add_partition()
  215. // call - likewise part2 and part3 - set it correctly so that equals check
  216. // doesn't fail
  217. adjust(client, part, dbName, tblName);
  218. adjust(client, part2, dbName, tblName);
  219. adjust(client, part3, dbName, tblName);
  220. }
  221. assertTrue("Partitions are not same", part.equals(part_get));
  222. String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
  223. String part2Name = "ds=2008-07-01 14%3A13%3A12/hr=15";
  224. String part3Name ="ds=2008-07-02 14%3A13%3A12/hr=15";
  225. part_get = client.getPartition(dbName, tblName, partName);
  226. assertTrue("Partitions are not the same", part.equals(part_get));
  227. // Test partition listing with a partial spec - ds is specified but hr is not
  228. List<String> partialVals = new ArrayList<String>();
  229. partialVals.add(vals.get(0));
  230. Set<Partition> parts = new HashSet<Partition>();
  231. parts.add(part);
  232. parts.add(part2);
  233. List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
  234. (short) -1);
  235. assertTrue("Should have returned 2 partitions", partial.size() == 2);
  236. assertTrue("Not all parts returned", partial.containsAll(parts));
  237. Set<String> partNames = new HashSet<String>();
  238. partNames.add(partName);
  239. partNames.add(part2Name);
  240. List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
  241. (short) -1);
  242. assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
  243. assertTrue("Not all part names returned", partialNames.containsAll(partNames));
  244. // Test partition listing with a partial spec - hr is specified but ds is not
  245. parts.clear();
  246. parts.add(part2);
  247. parts.add(part3);
  248. partialVals.clear();
  249. partialVals.add("");
  250. partialVals.add(vals2.get(1));
  251. partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
  252. assertEquals("Should have returned 2 partitions", 2, partial.size());
  253. assertTrue("Not all parts returned", partial.containsAll(parts));
  254. partNames.clear();
  255. partNames.add(part2Name);
  256. partNames.add(part3Name);
  257. partialNames = client.listPartitionNames(dbName, tblName, partialVals,
  258. (short) -1);
  259. assertEquals("Should have returned 2 partition names", 2, partialNames.size());
  260. assertTrue("Not all part names returned", partialNames.containsAll(partNames));
  261. // Verify escaped partition names don't return partitions
  262. exceptionThrown = false;
  263. try {
  264. String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
  265. client.getPartition(dbName, tblName, badPartName);
  266. } catch(NoSuchObjectException e) {
  267. exceptionThrown = true;
  268. }
  269. assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
  270. Path partPath = new Path(part2.getSd().getLocation());
  271. FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf);
  272. assertTrue(fs.exists(partPath));
  273. client.dropPartition(dbName, tblName, part.getValues(), true);
  274. assertFalse(fs.exists(partPath));
  275. // Test append_partition_by_name
  276. client.appendPartition(dbName, tblName, partName);
  277. Partition part5 = client.getPartition(dbName, tblName, part.getValues());
  278. assertTrue("Append partition by name failed", part5.getValues().equals(vals));;
  279. Path part5Path = new Path(part5.getSd().getLocation());
  280. assertTrue(fs.exists(part5Path));
  281. // Test drop_partition_by_name
  282. assertTrue("Drop partition by name failed",
  283. client.dropPartition(dbName, tblName, partName, true));
  284. assertFalse(fs.exists(part5Path));
  285. // add the partition again so that drop table with a partition can be
  286. // tested
  287. retp = client.add_partition(part);
  288. assertNotNull("Unable to create partition " + part, retp);
  289. client.dropTable(dbName, tblName);
  290. client.dropType(typeName);
  291. // recreate table as external, drop partition and it should
  292. // still exist
  293. tbl.setParameters(new HashMap<String, String>());
  294. tbl.getParameters().put("EXTERNAL", "TRUE");
  295. client.createTable(tbl);
  296. retp = client.add_partition(part);
  297. assertTrue(fs.exists(partPath));
  298. client.dropPartition(dbName, tblName, part.getValues(), true);
  299. assertTrue(fs.exists(partPath));
  300. for (String tableName : client.getTables(dbName, "*")) {
  301. client.dropTable(dbName, tableName);
  302. }
  303. client.dropDatabase(dbName);
  304. } catch (Exception e) {
  305. System.err.println(StringUtils.stringifyException(e));
  306. System.err.println("testPartition() failed.");
  307. throw e;
  308. }
  309. }
  310. public void testAlterPartition() throws Throwable {
  311. try {
  312. String dbName = "compdb";
  313. String tblName = "comptbl";
  314. List<String> vals = new ArrayList<String>(2);
  315. vals.add("2008-07-01");
  316. vals.add("14");
  317. client.dropTable(dbName, tblName);
  318. silentDropDatabase(dbName);
  319. Database db = new Database();
  320. db.setName(dbName);
  321. db.setDescription("Alter Partition Test database");
  322. client.createDatabase(db);
  323. ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
  324. cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
  325. cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
  326. Table tbl = new Table();
  327. tbl.setDbName(dbName);
  328. tbl.setTableName(tblName);
  329. StorageDescriptor sd = new StorageDescriptor();
  330. tbl.setSd(sd);
  331. sd.setCols(cols);
  332. sd.setCompressed(false);
  333. sd.setNumBuckets(1);
  334. sd.setParameters(new HashMap<String, String>());
  335. sd.getParameters().put("test_param_1", "Use this for comments etc");
  336. sd.setBucketCols(new ArrayList<String>(2));
  337. sd.getBucketCols().add("name");
  338. sd.setSerdeInfo(new SerDeInfo());
  339. sd.getSerdeInfo().setName(tbl.getTableName());
  340. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  341. sd.getSerdeInfo().getParameters()
  342. .put(Constants.SERIALIZATION_FORMAT, "1");
  343. sd.setSortCols(new ArrayList<Order>());
  344. tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
  345. tbl.getPartitionKeys().add(
  346. new FieldSchema("ds", Constants.STRING_TYPE_NAME, ""));
  347. tbl.getPartitionKeys().add(
  348. new FieldSchema("hr", Constants.INT_TYPE_NAME, ""));
  349. client.createTable(tbl);
  350. if (isThriftClient) {
  351. // the createTable() above does not update the location in the 'tbl'
  352. // object when the client is a thrift client and the code below relies
  353. // on the location being present in the 'tbl' object - so get the table
  354. // from the metastore
  355. tbl = client.getTable(dbName, tblName);
  356. }
  357. Partition part = new Partition();
  358. part.setDbName(dbName);
  359. part.setTableName(tblName);
  360. part.setValues(vals);
  361. part.setParameters(new HashMap<String, String>());
  362. part.setSd(tbl.getSd());
  363. part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
  364. part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
  365. client.add_partition(part);
  366. Partition part2 = client.getPartition(dbName, tblName, part.getValues());
  367. part2.getParameters().put("retention", "10");
  368. part2.getSd().setNumBuckets(12);
  369. part2.getSd().getSerdeInfo().getParameters().put("abc", "1");
  370. client.alter_partition(dbName, tblName, part2);
  371. Partition part3 = client.getPartition(dbName, tblName, part.getValues());
  372. assertEquals("couldn't alter partition", part3.getParameters().get(
  373. "retention"), "10");
  374. assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo()
  375. .getParameters().get("abc"), "1");
  376. assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(),
  377. 12);
  378. client.dropTable(dbName, tblName);
  379. client.dropDatabase(dbName);
  380. } catch (Exception e) {
  381. System.err.println(StringUtils.stringifyException(e));
  382. System.err.println("testPartition() failed.");
  383. throw e;
  384. }
  385. }
  386. public void testDatabase() throws Throwable {
  387. try {
  388. // clear up any existing databases
  389. silentDropDatabase(TEST_DB1_NAME);
  390. silentDropDatabase(TEST_DB2_NAME);
  391. Database db = new Database();
  392. db.setName(TEST_DB1_NAME);
  393. client.createDatabase(db);
  394. db = client.getDatabase(TEST_DB1_NAME);
  395. assertEquals("name of returned db is different from that of inserted db",
  396. TEST_DB1_NAME, db.getName());
  397. assertEquals("location of the returned db is different from that of inserted db",
  398. warehouse.getDefaultDatabasePath(TEST_DB1_NAME).toString(), db.getLocationUri());
  399. Database db2 = new Database();
  400. db2.setName(TEST_DB2_NAME);
  401. client.createDatabase(db2);
  402. db2 = client.getDatabase(TEST_DB2_NAME);
  403. assertEquals("name of returned db is different from that of inserted db",
  404. TEST_DB2_NAME, db2.getName());
  405. assertEquals("location of the returned db is different from that of inserted db",
  406. warehouse.getDefaultDatabasePath(TEST_DB2_NAME).toString(), db2.getLocationUri());
  407. List<String> dbs = client.getDatabases(".*");
  408. assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME));
  409. assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME));
  410. client.dropDatabase(TEST_DB1_NAME);
  411. client.dropDatabase(TEST_DB2_NAME);
  412. silentDropDatabase(TEST_DB1_NAME);
  413. silentDropDatabase(TEST_DB2_NAME);
  414. } catch (Throwable e) {
  415. System.err.println(StringUtils.stringifyException(e));
  416. System.err.println("testDatabase() failed.");
  417. throw e;
  418. }
  419. }
  420. public void testSimpleTypeApi() throws Exception {
  421. try {
  422. client.dropType(Constants.INT_TYPE_NAME);
  423. Type typ1 = new Type();
  424. typ1.setName(Constants.INT_TYPE_NAME);
  425. boolean ret = client.createType(typ1);
  426. assertTrue("Unable to create type", ret);
  427. Type typ1_2 = client.getType(Constants.INT_TYPE_NAME);
  428. assertNotNull(typ1_2);
  429. assertEquals(typ1.getName(), typ1_2.getName());
  430. ret = client.dropType(Constants.INT_TYPE_NAME);
  431. assertTrue("unable to drop type integer", ret);
  432. boolean exceptionThrown = false;
  433. try {
  434. client.getType(Constants.INT_TYPE_NAME);
  435. } catch (NoSuchObjectException e) {
  436. exceptionThrown = true;
  437. }
  438. assertTrue("Expected NoSuchObjectException", exceptionThrown);
  439. } catch (Exception e) {
  440. System.err.println(StringUtils.stringifyException(e));
  441. System.err.println("testSimpleTypeApi() failed.");
  442. throw e;
  443. }
  444. }
  445. // TODO:pc need to enhance this with complex fields and getType_all function
  446. public void testComplexTypeApi() throws Exception {
  447. try {
  448. client.dropType("Person");
  449. Type typ1 = new Type();
  450. typ1.setName("Person");
  451. typ1.setFields(new ArrayList<FieldSchema>(2));
  452. typ1.getFields().add(
  453. new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
  454. typ1.getFields().add(
  455. new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
  456. boolean ret = client.createType(typ1);
  457. assertTrue("Unable to create type", ret);
  458. Type typ1_2 = client.getType("Person");
  459. assertNotNull("type Person not found", typ1_2);
  460. assertEquals(typ1.getName(), typ1_2.getName());
  461. assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
  462. assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
  463. assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
  464. client.dropType("Family");
  465. Type fam = new Type();
  466. fam.setName("Family");
  467. fam.setFields(new ArrayList<FieldSchema>(2));
  468. fam.getFields().add(
  469. new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
  470. fam.getFields().add(
  471. new FieldSchema("members",
  472. MetaStoreUtils.getListType(typ1.getName()), ""));
  473. ret = client.createType(fam);
  474. assertTrue("Unable to create type " + fam.getName(), ret);
  475. Type fam2 = client.getType("Family");
  476. assertNotNull("type Person not found", fam2);
  477. assertEquals(fam.getName(), fam2.getName());
  478. assertEquals(fam.getFields().size(), fam2.getFields().size());
  479. assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
  480. assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
  481. ret = client.dropType("Family");
  482. assertTrue("unable to drop type Family", ret);
  483. ret = client.dropType("Person");
  484. assertTrue("unable to drop type Person", ret);
  485. boolean exceptionThrown = false;
  486. try {
  487. client.getType("Person");
  488. } catch (NoSuchObjectException e) {
  489. exceptionThrown = true;
  490. }
  491. assertTrue("Expected NoSuchObjectException", exceptionThrown);
  492. } catch (Exception e) {
  493. System.err.println(StringUtils.stringifyException(e));
  494. System.err.println("testComplexTypeApi() failed.");
  495. throw e;
  496. }
  497. }
  498. public void testSimpleTable() throws Exception {
  499. try {
  500. String dbName = "simpdb";
  501. String tblName = "simptbl";
  502. String tblName2 = "simptbl2";
  503. String typeName = "Person";
  504. client.dropTable(dbName, tblName);
  505. silentDropDatabase(dbName);
  506. Database db = new Database();
  507. db.setName(dbName);
  508. client.createDatabase(db);
  509. client.dropType(typeName);
  510. Type typ1 = new Type();
  511. typ1.setName(typeName);
  512. typ1.setFields(new ArrayList<FieldSchema>(2));
  513. typ1.getFields().add(
  514. new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
  515. typ1.getFields().add(
  516. new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
  517. client.createType(typ1);
  518. Table tbl = new Table();
  519. tbl.setDbName(dbName);
  520. tbl.setTableName(tblName);
  521. StorageDescriptor sd = new StorageDescriptor();
  522. tbl.setSd(sd);
  523. sd.setCols(typ1.getFields());
  524. sd.setCompressed(false);
  525. sd.setNumBuckets(1);
  526. sd.setParameters(new HashMap<String, String>());
  527. sd.getParameters().put("test_param_1", "Use this for comments etc");
  528. sd.setBucketCols(new ArrayList<String>(2));
  529. sd.getBucketCols().add("name");
  530. sd.setSerdeInfo(new SerDeInfo());
  531. sd.getSerdeInfo().setName(tbl.getTableName());
  532. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  533. sd.getSerdeInfo().getParameters().put(
  534. org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
  535. sd.getSerdeInfo().setSerializationLib(
  536. org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
  537. tbl.setPartitionKeys(new ArrayList<FieldSchema>());
  538. client.createTable(tbl);
  539. if (isThriftClient) {
  540. // the createTable() above does not update the location in the 'tbl'
  541. // object when the client is a thrift client and the code below relies
  542. // on the location being present in the 'tbl' object - so get the table
  543. // from the metastore
  544. tbl = client.getTable(dbName, tblName);
  545. }
  546. Table tbl2 = client.getTable(dbName, tblName);
  547. assertNotNull(tbl2);
  548. assertEquals(tbl2.getDbName(), dbName);
  549. assertEquals(tbl2.getTableName(), tblName);
  550. assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
  551. assertEquals(tbl2.getSd().isCompressed(), false);
  552. assertEquals(tbl2.getSd().getNumBuckets(), 1);
  553. assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
  554. assertNotNull(tbl2.getSd().getSerdeInfo());
  555. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  556. sd.getSerdeInfo().getParameters().put(
  557. org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
  558. tbl2.setTableName(tblName2);
  559. tbl2.setParameters(new HashMap<String, String>());
  560. tbl2.getParameters().put("EXTERNAL", "TRUE");
  561. tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
  562. List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
  563. assertNotNull(fieldSchemas);
  564. assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
  565. for (FieldSchema fs : tbl.getSd().getCols()) {
  566. assertTrue(fieldSchemas.contains(fs));
  567. }
  568. List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
  569. assertNotNull(fieldSchemasFull);
  570. assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
  571. + tbl.getPartitionKeys().size());
  572. for (FieldSchema fs : tbl.getSd().getCols()) {
  573. assertTrue(fieldSchemasFull.contains(fs));
  574. }
  575. for (FieldSchema fs : tbl.getPartitionKeys()) {
  576. assertTrue(fieldSchemasFull.contains(fs));
  577. }
  578. client.createTable(tbl2);
  579. if (isThriftClient) {
  580. tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
  581. }
  582. Table tbl3 = client.getTable(dbName, tblName2);
  583. assertNotNull(tbl3);
  584. assertEquals(tbl3.getDbName(), dbName);
  585. assertEquals(tbl3.getTableName(), tblName2);
  586. assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
  587. assertEquals(tbl3.getSd().isCompressed(), false);
  588. assertEquals(tbl3.getSd().getNumBuckets(), 1);
  589. assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
  590. assertEquals(tbl3.getParameters(), tbl2.getParameters());
  591. fieldSchemas = client.getFields(dbName, tblName2);
  592. assertNotNull(fieldSchemas);
  593. assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
  594. for (FieldSchema fs : tbl2.getSd().getCols()) {
  595. assertTrue(fieldSchemas.contains(fs));
  596. }
  597. fieldSchemasFull = client.getSchema(dbName, tblName2);
  598. assertNotNull(fieldSchemasFull);
  599. assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()
  600. + tbl2.getPartitionKeys().size());
  601. for (FieldSchema fs : tbl2.getSd().getCols()) {
  602. assertTrue(fieldSchemasFull.contains(fs));
  603. }
  604. for (FieldSchema fs : tbl2.getPartitionKeys()) {
  605. assertTrue(fieldSchemasFull.contains(fs));
  606. }
  607. assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
  608. .get("test_param_1"));
  609. assertEquals("name", tbl2.getSd().getBucketCols().get(0));
  610. assertTrue("Partition key list is not empty",
  611. (tbl2.getPartitionKeys() == null)
  612. || (tbl2.getPartitionKeys().size() == 0));
  613. FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf);
  614. client.dropTable(dbName, tblName);
  615. assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
  616. client.dropTable(dbName, tblName2);
  617. assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
  618. client.dropType(typeName);
  619. client.dropDatabase(dbName);
  620. } catch (Exception e) {
  621. System.err.println(StringUtils.stringifyException(e));
  622. System.err.println("testSimpleTable() failed.");
  623. throw e;
  624. }
  625. }
  626. public void testAlterTable() throws Exception {
  627. String dbName = "alterdb";
  628. String invTblName = "alter-tbl";
  629. String tblName = "altertbl";
  630. try {
  631. client.dropTable(dbName, tblName);
  632. silentDropDatabase(dbName);
  633. Database db = new Database();
  634. db.setName(dbName);
  635. client.createDatabase(db);
  636. ArrayList<FieldSchema> invCols = new ArrayList<FieldSchema>(2);
  637. invCols.add(new FieldSchema("n-ame", Constants.STRING_TYPE_NAME, ""));
  638. invCols.add(new FieldSchema("in.come", Constants.INT_TYPE_NAME, ""));
  639. Table tbl = new Table();
  640. tbl.setDbName(dbName);
  641. tbl.setTableName(invTblName);
  642. StorageDescriptor sd = new StorageDescriptor();
  643. tbl.setSd(sd);
  644. sd.setCols(invCols);
  645. sd.setCompressed(false);
  646. sd.setNumBuckets(1);
  647. sd.setParameters(new HashMap<String, String>());
  648. sd.getParameters().put("test_param_1", "Use this for comments etc");
  649. sd.setBucketCols(new ArrayList<String>(2));
  650. sd.getBucketCols().add("name");
  651. sd.setSerdeInfo(new SerDeInfo());
  652. sd.getSerdeInfo().setName(tbl.getTableName());
  653. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  654. sd.getSerdeInfo().getParameters().put(
  655. org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
  656. boolean failed = false;
  657. try {
  658. client.createTable(tbl);
  659. } catch (InvalidObjectException ex) {
  660. failed = true;
  661. }
  662. if (!failed) {
  663. assertTrue("Able to create table with invalid name: " + invTblName,
  664. false);
  665. }
  666. ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
  667. cols.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
  668. cols.add(new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
  669. // create a valid table
  670. tbl.setTableName(tblName);
  671. tbl.getSd().setCols(cols);
  672. client.createTable(tbl);
  673. if (isThriftClient) {
  674. tbl = client.getTable(tbl.getDbName(), tbl.getTableName());
  675. }
  676. // now try to invalid alter table
  677. Table tbl2 = client.getTable(dbName, tblName);
  678. failed = false;
  679. try {
  680. tbl2.setTableName(invTblName);
  681. tbl2.getSd().setCols(invCols);
  682. client.alter_table(dbName, tblName, tbl2);
  683. } catch (InvalidOperationException ex) {
  684. failed = true;
  685. }
  686. if (!failed) {
  687. assertTrue("Able to rename table with invalid name: " + invTblName,
  688. false);
  689. }
  690. // try a valid alter table
  691. tbl2.setTableName(tblName + "_renamed");
  692. tbl2.getSd().setCols(cols);
  693. tbl2.getSd().setNumBuckets(32);
  694. client.alter_table(dbName, tblName, tbl2);
  695. Table tbl3 = client.getTable(dbName, tbl2.getTableName());
  696. assertEquals("Alter table didn't succeed. Num buckets is different ",
  697. tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets());
  698. // check that data has moved
  699. FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf);
  700. assertFalse("old table location still exists", fs.exists(new Path(tbl
  701. .getSd().getLocation())));
  702. assertTrue("data did not move to new location", fs.exists(new Path(tbl3
  703. .getSd().getLocation())));
  704. if (!isThriftClient) {
  705. assertEquals("alter table didn't move data correct location", tbl3
  706. .getSd().getLocation(), tbl2.getSd().getLocation());
  707. }
  708. } catch (Exception e) {
  709. System.err.println(StringUtils.stringifyException(e));
  710. System.err.println("testSimpleTable() failed.");
  711. throw e;
  712. } finally {
  713. silentDropDatabase(dbName);
  714. }
  715. }
  716. public void testComplexTable() throws Exception {
  717. String dbName = "compdb";
  718. String tblName = "comptbl";
  719. String typeName = "Person";
  720. try {
  721. client.dropTable(dbName, tblName);
  722. silentDropDatabase(dbName);
  723. Database db = new Database();
  724. db.setName(dbName);
  725. client.createDatabase(db);
  726. client.dropType(typeName);
  727. Type typ1 = new Type();
  728. typ1.setName(typeName);
  729. typ1.setFields(new ArrayList<FieldSchema>(2));
  730. typ1.getFields().add(
  731. new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
  732. typ1.getFields().add(
  733. new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
  734. client.createType(typ1);
  735. Table tbl = new Table();
  736. tbl.setDbName(dbName);
  737. tbl.setTableName(tblName);
  738. StorageDescriptor sd = new StorageDescriptor();
  739. tbl.setSd(sd);
  740. sd.setCols(typ1.getFields());
  741. sd.setCompressed(false);
  742. sd.setNumBuckets(1);
  743. sd.setParameters(new HashMap<String, String>());
  744. sd.getParameters().put("test_param_1", "Use this for comments etc");
  745. sd.setBucketCols(new ArrayList<String>(2));
  746. sd.getBucketCols().add("name");
  747. sd.setSerdeInfo(new SerDeInfo());
  748. sd.getSerdeInfo().setName(tbl.getTableName());
  749. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  750. sd.getSerdeInfo().getParameters().put(
  751. org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "9");
  752. sd.getSerdeInfo().setSerializationLib(
  753. org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
  754. tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
  755. tbl.getPartitionKeys().add(
  756. new FieldSchema("ds",
  757. org.apache.hadoop.hive.serde.Constants.DATE_TYPE_NAME, ""));
  758. tbl.getPartitionKeys().add(
  759. new FieldSchema("hr",
  760. org.apache.hadoop.hive.serde.Constants.INT_TYPE_NAME, ""));
  761. client.createTable(tbl);
  762. Table tbl2 = client.getTable(dbName, tblName);
  763. assertEquals(tbl2.getDbName(), dbName);
  764. assertEquals(tbl2.getTableName(), tblName);
  765. assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
  766. assertFalse(tbl2.getSd().isCompressed());
  767. assertEquals(tbl2.getSd().getNumBuckets(), 1);
  768. assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
  769. .get("test_param_1"));
  770. assertEquals("name", tbl2.getSd().getBucketCols().get(0));
  771. assertNotNull(tbl2.getPartitionKeys());
  772. assertEquals(2, tbl2.getPartitionKeys().size());
  773. assertEquals(Constants.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0)
  774. .getType());
  775. assertEquals(Constants.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1)
  776. .getType());
  777. assertEquals("ds", tbl2.getPartitionKeys().get(0).getName());
  778. assertEquals("hr", tbl2.getPartitionKeys().get(1).getName());
  779. List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
  780. assertNotNull(fieldSchemas);
  781. assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
  782. for (FieldSchema fs : tbl.getSd().getCols()) {
  783. assertTrue(fieldSchemas.contains(fs));
  784. }
  785. List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
  786. assertNotNull(fieldSchemasFull);
  787. assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
  788. + tbl.getPartitionKeys().size());
  789. for (FieldSchema fs : tbl.getSd().getCols()) {
  790. assertTrue(fieldSchemasFull.contains(fs));
  791. }
  792. for (FieldSchema fs : tbl.getPartitionKeys()) {
  793. assertTrue(fieldSchemasFull.contains(fs));
  794. }
  795. } catch (Exception e) {
  796. System.err.println(StringUtils.stringifyException(e));
  797. System.err.println("testComplexTable() failed.");
  798. throw e;
  799. } finally {
  800. client.dropTable(dbName, tblName);
  801. boolean ret = client.dropType(typeName);
  802. assertTrue("Unable to drop type " + typeName, ret);
  803. client.dropDatabase(dbName);
  804. }
  805. }
  806. public void testGetConfigValue() {
  807. String val = "value";
  808. if (!isThriftClient) {
  809. try {
  810. assertEquals(client.getConfigValue("hive.key1", val), "value1");
  811. assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com");
  812. assertEquals(client.getConfigValue("hive.key3", val), "");
  813. assertEquals(client.getConfigValue("hive.key4", val), "0");
  814. assertEquals(client.getConfigValue("hive.key5", val), val);
  815. assertEquals(client.getConfigValue(null, val), val);
  816. } catch (TException e) {
  817. e.printStackTrace();
  818. assert (false);
  819. } catch (ConfigValSecurityException e) {
  820. e.printStackTrace();
  821. assert (false);
  822. }
  823. }
  824. boolean threwException = false;
  825. try {
  826. // Attempting to get the password should throw an exception
  827. client.getConfigValue("javax.jdo.option.ConnectionPassword", "password");
  828. } catch (TException e) {
  829. e.printStackTrace();
  830. assert (false);
  831. } catch (ConfigValSecurityException e) {
  832. threwException = true;
  833. }
  834. assert (threwException);
  835. }
  836. private static void adjust(HiveMetaStoreClient client, Partition part,
  837. String dbName, String tblName)
  838. throws NoSuchObjectException, MetaException, TException {
  839. Partition part_get = client.getPartition(dbName, tblName, part.getValues());
  840. part.setCreateTime(part_get.getCreateTime());
  841. part.putToParameters(org.apache.hadoop.hive.metastore.api.Constants.DDL_TIME, Long.toString(part_get.getCreateTime()));
  842. }
  843. private static void silentDropDatabase(String dbName) throws MetaException, TException {
  844. try {
  845. for (String tableName : client.getTables(dbName, "*")) {
  846. client.dropTable(dbName, tableName);
  847. }
  848. client.dropDatabase(dbName);
  849. } catch (NoSuchObjectException e) {
  850. } catch (InvalidOperationException e) {
  851. }
  852. }
  853. /**
  854. * Tests for list partition by filter functionality.
  855. * @throws Exception
  856. */
  857. public void testPartitionFilter() throws Exception {
  858. String dbName = "filterdb";
  859. String tblName = "filtertbl";
  860. List<String> vals = new ArrayList<String>(3);
  861. vals.add("p11");
  862. vals.add("p21");
  863. vals.add("p31");
  864. List <String> vals2 = new ArrayList<String>(3);
  865. vals2.add("p11");
  866. vals2.add("p22");
  867. vals2.add("p31");
  868. List <String> vals3 = new ArrayList<String>(3);
  869. vals3.add("p12");
  870. vals3.add("p21");
  871. vals3.add("p31");
  872. List <String> vals4 = new ArrayList<String>(3);
  873. vals4.add("p12");
  874. vals4.add("p23");
  875. vals4.add("p31");
  876. List <String> vals5 = new ArrayList<String>(3);
  877. vals5.add("p13");
  878. vals5.add("p24");
  879. vals5.add("p31");
  880. List <String> vals6 = new ArrayList<String>(3);
  881. vals6.add("p13");
  882. vals6.add("p25");
  883. vals6.add("p31");
  884. silentDropDatabase(dbName);
  885. Database db = new Database();
  886. db.setName(dbName);
  887. client.createDatabase(db);
  888. ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
  889. cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
  890. cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
  891. ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(3);
  892. partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
  893. partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, ""));
  894. partCols.add(new FieldSchema("p3", Constants.INT_TYPE_NAME, ""));
  895. Table tbl = new Table();
  896. tbl.setDbName(dbName);
  897. tbl.setTableName(tblName);
  898. StorageDescriptor sd = new StorageDescriptor();
  899. tbl.setSd(sd);
  900. sd.setCols(cols);
  901. sd.setCompressed(false);
  902. sd.setNumBuckets(1);
  903. sd.setParameters(new HashMap<String, String>());
  904. sd.setBucketCols(new ArrayList<String>());
  905. sd.setSerdeInfo(new SerDeInfo());
  906. sd.getSerdeInfo().setName(tbl.getTableName());
  907. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  908. sd.getSerdeInfo().getParameters()
  909. .put(Constants.SERIALIZATION_FORMAT, "1");
  910. sd.setSortCols(new ArrayList<Order>());
  911. tbl.setPartitionKeys(partCols);
  912. client.createTable(tbl);
  913. tbl = client.getTable(dbName, tblName);
  914. add_partition(client, tbl, vals, "part1");
  915. add_partition(client, tbl, vals2, "part2");
  916. add_partition(client, tbl, vals3, "part3");
  917. add_partition(client, tbl, vals4, "part4");
  918. add_partition(client, tbl, vals5, "part5");
  919. add_partition(client, tbl, vals6, "part6");
  920. checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
  921. checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
  922. checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
  923. checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
  924. checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
  925. checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
  926. checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
  927. checkFilter(client, dbName, tblName,
  928. "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
  929. checkFilter(client, dbName, tblName,
  930. "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " +
  931. "(p1=\"p13\" aNd p2=\"p24\")", 4);
  932. //test for and or precedence
  933. checkFilter(client, dbName, tblName,
  934. "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
  935. checkFilter(client, dbName, tblName,
  936. "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
  937. checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
  938. checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
  939. checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
  940. checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
  941. checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 4);
  942. checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
  943. checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
  944. //Test for setting the maximum partition count
  945. List<Partition> partitions = client.listPartitionsByFilter(dbName,
  946. tblName, "p1 >= \"p12\"", (short) 2);
  947. assertEquals("User specified row limit for partitions",
  948. 2, partitions.size());
  949. //Negative tests
  950. Exception me = null;
  951. try {
  952. client.listPartitionsByFilter(dbName,
  953. tblName, "p3 >= \"p12\"", (short) -1);
  954. } catch(MetaException e) {
  955. me = e;
  956. }
  957. assertNotNull(me);
  958. assertTrue("Filter on int partition key", me.getMessage().contains(
  959. "Filtering is supported only on partition keys of type string"));
  960. me = null;
  961. try {
  962. client.listPartitionsByFilter(dbName,
  963. tblName, "c1 >= \"p12\"", (short) -1);
  964. } catch(MetaException e) {
  965. me = e;
  966. }
  967. assertNotNull(me);
  968. assertTrue("Filter on invalid key", me.getMessage().contains(
  969. "<c1> is not a partitioning key for the table"));
  970. me = null;
  971. try {
  972. client.listPartitionsByFilter(dbName,
  973. tblName, "c1 >= ", (short) -1);
  974. } catch(MetaException e) {
  975. me = e;
  976. }
  977. assertNotNull(me);
  978. assertTrue("Invalid filter string", me.getMessage().contains(
  979. "Error parsing partition filter"));
  980. me = null;
  981. try {
  982. client.listPartitionsByFilter("invDBName",
  983. "invTableName", "p1 = \"p11\"", (short) -1);
  984. } catch(NoSuchObjectException e) {
  985. me = e;
  986. }
  987. assertNotNull(me);
  988. assertTrue("NoSuchObject exception", me.getMessage().contains(
  989. "database/table does not exist"));
  990. client.dropTable(dbName, tblName);
  991. client.dropDatabase(dbName);
  992. }
  993. /**
  994. * Test filtering on table with single partition
  995. * @throws Exception
  996. */
  997. public void testFilterSinglePartition() throws Exception {
  998. String dbName = "filterdb";
  999. String tblName = "filtertbl";
  1000. List<String> vals = new ArrayList<String>(1);
  1001. vals.add("p11");
  1002. List <String> vals2 = new ArrayList<String>(1);
  1003. vals2.add("p12");
  1004. List <String> vals3 = new ArrayList<String>(1);
  1005. vals3.add("p13");
  1006. silentDropDatabase(dbName);
  1007. Database db = new Database();
  1008. db.setName(dbName);
  1009. client.createDatabase(db);
  1010. ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
  1011. cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
  1012. cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
  1013. ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
  1014. partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
  1015. Table tbl = new Table();
  1016. tbl.setDbName(dbName);
  1017. tbl.setTableName(tblName);
  1018. StorageDescriptor sd = new StorageDescriptor();
  1019. tbl.setSd(sd);
  1020. sd.setCols(cols);
  1021. sd.setCompressed(false);
  1022. sd.setNumBuckets(1);
  1023. sd.setParameters(new HashMap<String, String>());
  1024. sd.setBucketCols(new ArrayList<String>());
  1025. sd.setSerdeInfo(new SerDeInfo());
  1026. sd.getSerdeInfo().setName(tbl.getTableName());
  1027. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  1028. sd.getSerdeInfo().getParameters()
  1029. .put(Constants.SERIALIZATION_FORMAT, "1");
  1030. sd.setSortCols(new ArrayList<Order>());
  1031. tbl.setPartitionKeys(partCols);
  1032. client.createTable(tbl);
  1033. tbl = client.getTable(dbName, tblName);
  1034. add_partition(client, tbl, vals, "part1");
  1035. add_partition(client, tbl, vals2, "part2");
  1036. add_partition(client, tbl, vals3, "part3");
  1037. checkFilter(client, dbName, tblName, "p1 = \"p12\"", 1);
  1038. checkFilter(client, dbName, tblName, "p1 < \"p12\"", 1);
  1039. checkFilter(client, dbName, tblName, "p1 > \"p12\"", 1);
  1040. checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 2);
  1041. checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 2);
  1042. checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 2);
  1043. checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 3);
  1044. checkFilter(client, dbName, tblName, "p1 like \"p.*2\"", 1);
  1045. client.dropTable(dbName, tblName);
  1046. client.dropDatabase(dbName);
  1047. }
  1048. /**
  1049. * Test filtering based on the value of the last partition
  1050. * @throws Exception
  1051. */
  1052. public void testFilterLastPartition() throws Exception {
  1053. String dbName = "filterdb";
  1054. String tblName = "filtertbl";
  1055. List<String> vals = new ArrayList<String>(2);
  1056. vals.add("p11");
  1057. vals.add("p21");
  1058. List <String> vals2 = new ArrayList<String>(2);
  1059. vals2.add("p11");
  1060. vals2.add("p22");
  1061. List <String> vals3 = new ArrayList<String>(2);
  1062. vals3.add("p12");
  1063. vals3.add("p21");
  1064. silentDropDatabase(dbName);
  1065. Database db = new Database();
  1066. db.setName(dbName);
  1067. client.createDatabase(db);
  1068. ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
  1069. cols.add(new FieldSchema("c1", Constants.STRING_TYPE_NAME, ""));
  1070. cols.add(new FieldSchema("c2", Constants.INT_TYPE_NAME, ""));
  1071. ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(2);
  1072. partCols.add(new FieldSchema("p1", Constants.STRING_TYPE_NAME, ""));
  1073. partCols.add(new FieldSchema("p2", Constants.STRING_TYPE_NAME, ""));
  1074. Table tbl = new Table();
  1075. tbl.setDbName(dbName);
  1076. tbl.setTableName(tblName);
  1077. StorageDescriptor sd = new StorageDescriptor();
  1078. tbl.setSd(sd);
  1079. sd.setCols(cols);
  1080. sd.setCompressed(false);
  1081. sd.setNumBuckets(1);
  1082. sd.setParameters(new HashMap<String, String>());
  1083. sd.setBucketCols(new ArrayList<String>());
  1084. sd.setSerdeInfo(new SerDeInfo());
  1085. sd.getSerdeInfo().setName(tbl.getTableName());
  1086. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  1087. sd.getSerdeInfo().getParameters()
  1088. .put(Constants.SERIALIZATION_FORMAT, "1");
  1089. sd.setSortCols(new ArrayList<Order>());
  1090. tbl.setPartitionKeys(partCols);
  1091. client.createTable(tbl);
  1092. tbl = client.getTable(dbName, tblName);
  1093. add_partition(client, tbl, vals, "part1");
  1094. add_partition(client, tbl, vals2, "part2");
  1095. add_partition(client, tbl, vals3, "part3");
  1096. checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
  1097. checkFilter(client, dbName, tblName, "p2 < \"p23\"", 3);
  1098. checkFilter(client, dbName, tblName, "p2 > \"p21\"", 1);
  1099. checkFilter(client, dbName, tblName, "p2 >= \"p21\"", 3);
  1100. checkFilter(client, dbName, tblName, "p2 <= \"p21\"", 2);
  1101. checkFilter(client, dbName, tblName, "p2 <> \"p12\"", 3);
  1102. checkFilter(client, dbName, tblName, "p2 like \"p2.*\"", 3);
  1103. checkFilter(client, dbName, tblName, "p2 like \"p.*2\"", 1);
  1104. client.dropTable(dbName, tblName);
  1105. client.dropDatabase(dbName);
  1106. }
  1107. private void checkFilter(HiveMetaStoreClient client, String dbName,
  1108. String tblName, String filter, int expectedCount)
  1109. throws MetaException, NoSuchObjectException, TException {
  1110. List<Partition> partitions = client.listPartitionsByFilter(dbName,
  1111. tblName, filter, (short) -1);
  1112. assertEquals("Partition count expected for filter " + filter,
  1113. expectedCount, partitions.size());
  1114. }
  1115. private void add_partition(HiveMetaStoreClient client, Table table,
  1116. List<String> vals, String location) throws InvalidObjectException,
  1117. AlreadyExistsException, MetaException, TException {
  1118. Partition part = new Partition();
  1119. part.setDbName(table.getDbName());
  1120. part.setTableName(table.getTableName());
  1121. part.setValues(vals);
  1122. part.setParameters(new HashMap<String, String>());
  1123. part.setSd(table.getSd());
  1124. part.getSd().setSerdeInfo(table.getSd().getSerdeInfo());
  1125. part.getSd().setLocation(table.getSd().getLocation() + location);
  1126. client.add_partition(part);
  1127. }
  1128. /**
  1129. * Tests {@link HiveMetaStoreClient#newSynchronizedClient}. Does not
  1130. * actually test multithreading, but does verify that the proxy
  1131. * at least works correctly.
  1132. */
  1133. public void testSynchronized() throws Exception {
  1134. IMetaStoreClient synchronizedClient =
  1135. HiveMetaStoreClient.newSynchronizedClient(client);
  1136. List<String> databases = synchronizedClient.getAllDatabases();
  1137. assertEquals(1, databases.size());
  1138. }
  1139. }

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/484111
推荐阅读
相关标签
  

闽ICP备14008679号