001/* 002 * Copyright 2016-2020 Ping Identity Corporation 003 * All Rights Reserved. 004 */ 005/* 006 * Copyright 2016-2020 Ping Identity Corporation 007 * 008 * Licensed under the Apache License, Version 2.0 (the "License"); 009 * you may not use this file except in compliance with the License. 010 * You may obtain a copy of the License at 011 * 012 * http://www.apache.org/licenses/LICENSE-2.0 013 * 014 * Unless required by applicable law or agreed to in writing, software 015 * distributed under the License is distributed on an "AS IS" BASIS, 016 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 017 * See the License for the specific language governing permissions and 018 * limitations under the License. 019 */ 020/* 021 * Copyright (C) 2016-2020 Ping Identity Corporation 022 * 023 * This program is free software; you can redistribute it and/or modify 024 * it under the terms of the GNU General Public License (GPLv2 only) 025 * or the terms of the GNU Lesser General Public License (LGPLv2.1 only) 026 * as published by the Free Software Foundation. 027 * 028 * This program is distributed in the hope that it will be useful, 029 * but WITHOUT ANY WARRANTY; without even the implied warranty of 030 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 031 * GNU General Public License for more details. 032 * 033 * You should have received a copy of the GNU General Public License 034 * along with this program; if not, see <http://www.gnu.org/licenses>. 035 */ 036package com.unboundid.ldap.sdk.unboundidds.tools; 037 038 039 040import java.io.File; 041import java.io.FileOutputStream; 042import java.io.InputStream; 043import java.io.IOException; 044import java.io.OutputStream; 045import java.util.ArrayList; 046import java.util.Collections; 047import java.util.LinkedHashMap; 048import java.util.LinkedHashSet; 049import java.util.List; 050import java.util.Map; 051import java.util.Set; 052import java.util.TreeMap; 053import java.util.concurrent.atomic.AtomicLong; 054import java.util.zip.GZIPOutputStream; 055 056import com.unboundid.ldap.sdk.Filter; 057import com.unboundid.ldap.sdk.LDAPException; 058import com.unboundid.ldap.sdk.ResultCode; 059import com.unboundid.ldap.sdk.Version; 060import com.unboundid.ldap.sdk.schema.Schema; 061import com.unboundid.ldif.LDIFException; 062import com.unboundid.ldif.LDIFReader; 063import com.unboundid.util.ByteStringBuffer; 064import com.unboundid.util.CommandLineTool; 065import com.unboundid.util.Debug; 066import com.unboundid.util.ObjectPair; 067import com.unboundid.util.PassphraseEncryptedOutputStream; 068import com.unboundid.util.StaticUtils; 069import com.unboundid.util.ThreadSafety; 070import com.unboundid.util.ThreadSafetyLevel; 071import com.unboundid.util.args.ArgumentException; 072import com.unboundid.util.args.ArgumentParser; 073import com.unboundid.util.args.BooleanArgument; 074import com.unboundid.util.args.DNArgument; 075import com.unboundid.util.args.FileArgument; 076import com.unboundid.util.args.FilterArgument; 077import com.unboundid.util.args.IntegerArgument; 078import com.unboundid.util.args.SubCommand; 079import com.unboundid.util.args.StringArgument; 080 081import static com.unboundid.ldap.sdk.unboundidds.tools.ToolMessages.*; 082 083 084 085/** 086 * This class provides a command-line tool that can be used to split an LDIF 087 * file below a specified base DN. This can be used to help initialize an 088 * entry-balancing deployment for use with the Directory Proxy Server. 089 * <BR> 090 * <BLOCKQUOTE> 091 * <B>NOTE:</B> This class, and other classes within the 092 * {@code com.unboundid.ldap.sdk.unboundidds} package structure, are only 093 * supported for use against Ping Identity, UnboundID, and 094 * Nokia/Alcatel-Lucent 8661 server products. These classes provide support 095 * for proprietary functionality or for external specifications that are not 096 * considered stable or mature enough to be guaranteed to work in an 097 * interoperable way with other types of LDAP servers. 098 * </BLOCKQUOTE> 099 * <BR> 100 * It supports a number of algorithms for determining how to split the data, 101 * including: 102 * <UL> 103 * <LI> 104 * split-using-hash-on-rdn -- The tool will compute a digest of the DN 105 * component that is immediately below the split base DN, and will use a 106 * modulus to select a backend set for a given entry. Since the split is 107 * based purely on computation involving the DN, the there is no need for 108 * caching to ensure that children are placed in the same sets as their 109 * parent, which allows it to run effectively with a small memory footprint. 110 * </LI> 111 * <LI> 112 * split-using-hash-on-attribute -- The tool will compute a digest of the 113 * value(s) of a specified attribute, and will use a modulus to select a 114 * backend set for a given entry. This hash will only be computed for 115 * entries immediately below the split base DN, and a cache will be used to 116 * ensure that entries more than one level below the split base DN are 117 * placed in the same backend set as their parent. 118 * </LI> 119 * <LI> 120 * split-using-fewest-entries -- When examining an entry immediately below 121 * the split base DN, the tool will place that entry in the set that has the 122 * fewest entries. For flat DITs in which entries only exist one level 123 * below the split base DN, this will effectively ensure a round-robin 124 * distribution. But for cases in which there are branches of varying sizes 125 * below the split base DN, this can help ensure that entries are more 126 * evenly distributed across backend sets. A cache will be used to ensure 127 * that entries more than one level below the split base DN are placed in 128 * the same backend set as their parent. 129 * </LI> 130 * <LI> 131 * split-using-filter -- When examining an entry immediately below the split 132 * base DN, a series of filters will be evaluated against that entry, which 133 * each filter associated with a specific backend set. If an entry doesn't 134 * match any of the provided filters, an RDN hash can be used to select the 135 * set. A cache will be used to ensure that entries more than one level 136 * below the split base DN are placed in the same backend set as their 137 * parent. 138 * </LI> 139 * </UL> 140 */ 141@ThreadSafety(level=ThreadSafetyLevel.NOT_THREADSAFE) 142public final class SplitLDIF 143 extends CommandLineTool 144{ 145 /** 146 * The maximum length of any message to write to standard output or standard 147 * error. 148 */ 149 private static final int MAX_OUTPUT_LINE_LENGTH = 150 StaticUtils.TERMINAL_WIDTH_COLUMNS - 1; 151 152 153 154 // The global arguments used by this tool. 155 private BooleanArgument addEntriesOutsideSplitBaseDNToAllSets = null; 156 private BooleanArgument addEntriesOutsideSplitBaseDNToDedicatedSet = null; 157 private BooleanArgument compressTarget = null; 158 private BooleanArgument encryptTarget = null; 159 private BooleanArgument sourceCompressed = null; 160 private DNArgument splitBaseDN = null; 161 private FileArgument encryptionPassphraseFile = null; 162 private FileArgument schemaPath = null; 163 private FileArgument sourceLDIF = null; 164 private FileArgument targetLDIFBasePath = null; 165 private IntegerArgument numThreads = null; 166 167 // The arguments used to split using a hash of the RDN. 168 private IntegerArgument splitUsingHashOnRDNNumSets = null; 169 private SubCommand splitUsingHashOnRDN = null; 170 171 // The arguments used to split using a hash on a specified attribute. 172 private BooleanArgument splitUsingHashOnAttributeAssumeFlatDIT = null; 173 private BooleanArgument splitUsingHashOnAttributeUseAllValues = null; 174 private IntegerArgument splitUsingHashOnAttributeNumSets = null; 175 private StringArgument splitUsingHashOnAttributeAttributeName = null; 176 private SubCommand splitUsingHashOnAttribute = null; 177 178 // The arguments used to choose the set with the fewest entries. 179 private BooleanArgument splitUsingFewestEntriesAssumeFlatDIT = null; 180 private IntegerArgument splitUsingFewestEntriesNumSets = null; 181 private SubCommand splitUsingFewestEntries = null; 182 183 // The arguments used to choose the set using a provided set of filters. 184 private BooleanArgument splitUsingFilterAssumeFlatDIT = null; 185 private FilterArgument splitUsingFilterFilter = null; 186 private SubCommand splitUsingFilter = null; 187 188 189 190 /** 191 * Runs the tool with the provided set of command-line arguments. 192 * 193 * @param args The command-line arguments provided to this tool. 194 */ 195 public static void main(final String... args) 196 { 197 final ResultCode resultCode = main(System.out, System.err, args); 198 if (resultCode != ResultCode.SUCCESS) 199 { 200 System.exit(resultCode.intValue()); 201 } 202 } 203 204 205 206 /** 207 * Runs the tool with the provided set of command-line arguments. 208 * 209 * @param out The output stream used for standard output. It may be 210 * {@code null} if standard output should be suppressed. 211 * @param err The output stream used for standard error. It may be 212 * {@code null} if standard error should be suppressed. 213 * @param args The command-line arguments provided to this tool. 214 * 215 * @return A result code with information about the processing performed. 216 * Any result code other than {@link ResultCode#SUCCESS} indicates 217 * that an error occurred. 218 */ 219 public static ResultCode main(final OutputStream out, final OutputStream err, 220 final String... args) 221 { 222 final SplitLDIF tool = new SplitLDIF(out, err); 223 return tool.runTool(args); 224 } 225 226 227 228 /** 229 * Creates a new instance of this tool with the provided information. 230 * 231 * @param out The output stream used for standard output. It may be 232 * {@code null} if standard output should be suppressed. 233 * @param err The output stream used for standard error. It may be 234 * {@code null} if standard error should be suppressed. 235 */ 236 public SplitLDIF(final OutputStream out, final OutputStream err) 237 { 238 super(out, err); 239 } 240 241 242 243 /** 244 * {@inheritDoc} 245 */ 246 @Override() 247 public String getToolName() 248 { 249 return "split-ldif"; 250 } 251 252 253 254 /** 255 * {@inheritDoc} 256 */ 257 @Override() 258 public String getToolDescription() 259 { 260 return INFO_SPLIT_LDIF_TOOL_DESCRIPTION.get(); 261 } 262 263 264 265 /** 266 * {@inheritDoc} 267 */ 268 @Override() 269 public String getToolVersion() 270 { 271 return Version.NUMERIC_VERSION_STRING; 272 } 273 274 275 276 /** 277 * {@inheritDoc} 278 */ 279 @Override() 280 public boolean supportsInteractiveMode() 281 { 282 return true; 283 } 284 285 286 287 /** 288 * {@inheritDoc} 289 */ 290 @Override() 291 public boolean defaultsToInteractiveMode() 292 { 293 return true; 294 } 295 296 297 298 /** 299 * {@inheritDoc} 300 */ 301 @Override() 302 public boolean supportsPropertiesFile() 303 { 304 return true; 305 } 306 307 308 309 /** 310 * {@inheritDoc} 311 */ 312 @Override() 313 public void addToolArguments(final ArgumentParser parser) 314 throws ArgumentException 315 { 316 // Add the global arguments. 317 sourceLDIF = new FileArgument('l', "sourceLDIF", true, 0, null, 318 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SOURCE_LDIF.get(), true, false, true, 319 false); 320 sourceLDIF.addLongIdentifier("inputLDIF", true); 321 sourceLDIF.addLongIdentifier("source-ldif", true); 322 sourceLDIF.addLongIdentifier("input-ldif", true); 323 parser.addArgument(sourceLDIF); 324 325 sourceCompressed = new BooleanArgument('C', "sourceCompressed", 326 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SOURCE_COMPRESSED.get()); 327 sourceCompressed.addLongIdentifier("inputCompressed", true); 328 sourceCompressed.addLongIdentifier("source-compressed", true); 329 sourceCompressed.addLongIdentifier("input-compressed", true); 330 parser.addArgument(sourceCompressed); 331 332 targetLDIFBasePath = new FileArgument('o', "targetLDIFBasePath", false, 1, 333 null, INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_TARGET_LDIF_BASE.get(), false, 334 true, true, false); 335 targetLDIFBasePath.addLongIdentifier("outputLDIFBasePath", true); 336 targetLDIFBasePath.addLongIdentifier("target-ldif-base-path", true); 337 targetLDIFBasePath.addLongIdentifier("output-ldif-base-path", true); 338 parser.addArgument(targetLDIFBasePath); 339 340 compressTarget = new BooleanArgument('c', "compressTarget", 341 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_COMPRESS_TARGET.get()); 342 compressTarget.addLongIdentifier("compressOutput", true); 343 compressTarget.addLongIdentifier("compress", true); 344 compressTarget.addLongIdentifier("compress-target", true); 345 compressTarget.addLongIdentifier("compress-output", true); 346 parser.addArgument(compressTarget); 347 348 encryptTarget = new BooleanArgument(null, "encryptTarget", 349 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_ENCRYPT_TARGET.get()); 350 encryptTarget.addLongIdentifier("encryptOutput", true); 351 encryptTarget.addLongIdentifier("encrypt", true); 352 encryptTarget.addLongIdentifier("encrypt-target", true); 353 encryptTarget.addLongIdentifier("encrypt-output", true); 354 parser.addArgument(encryptTarget); 355 356 encryptionPassphraseFile = new FileArgument(null, 357 "encryptionPassphraseFile", false, 1, null, 358 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_ENCRYPT_PW_FILE.get(), true, true, 359 true, false); 360 encryptionPassphraseFile.addLongIdentifier("encryptionPasswordFile", true); 361 encryptionPassphraseFile.addLongIdentifier("encryption-passphrase-file", 362 true); 363 encryptionPassphraseFile.addLongIdentifier("encryption-password-file", 364 true); 365 parser.addArgument(encryptionPassphraseFile); 366 367 splitBaseDN = new DNArgument('b', "splitBaseDN", true, 1, null, 368 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SPLIT_BASE_DN.get()); 369 splitBaseDN.addLongIdentifier("baseDN", true); 370 splitBaseDN.addLongIdentifier("split-base-dn", true); 371 splitBaseDN.addLongIdentifier("base-dn", true); 372 parser.addArgument(splitBaseDN); 373 374 addEntriesOutsideSplitBaseDNToAllSets = new BooleanArgument(null, 375 "addEntriesOutsideSplitBaseDNToAllSets", 1, 376 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_OUTSIDE_TO_ALL_SETS.get()); 377 addEntriesOutsideSplitBaseDNToAllSets.addLongIdentifier( 378 "add-entries-outside-split-base-dn-to-all-sets", true); 379 parser.addArgument(addEntriesOutsideSplitBaseDNToAllSets); 380 381 addEntriesOutsideSplitBaseDNToDedicatedSet = new BooleanArgument(null, 382 "addEntriesOutsideSplitBaseDNToDedicatedSet", 1, 383 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_OUTSIDE_TO_DEDICATED_SET.get()); 384 addEntriesOutsideSplitBaseDNToDedicatedSet.addLongIdentifier( 385 "add-entries-outside-split-base-dn-to-dedicated-set", true); 386 parser.addArgument(addEntriesOutsideSplitBaseDNToDedicatedSet); 387 388 schemaPath = new FileArgument(null, "schemaPath", false, 0, null, 389 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_SCHEMA_PATH.get(), true, false, false, 390 false); 391 schemaPath.addLongIdentifier("schemaFile", true); 392 schemaPath.addLongIdentifier("schemaDirectory", true); 393 schemaPath.addLongIdentifier("schema-path", true); 394 schemaPath.addLongIdentifier("schema-file", true); 395 schemaPath.addLongIdentifier("schema-directory", true); 396 parser.addArgument(schemaPath); 397 398 numThreads = new IntegerArgument('t', "numThreads", false, 1, null, 399 INFO_SPLIT_LDIF_GLOBAL_ARG_DESC_NUM_THREADS.get(), 1, 400 Integer.MAX_VALUE, 1); 401 numThreads.addLongIdentifier("num-threads", true); 402 parser.addArgument(numThreads); 403 404 405 // Add the subcommand used to split entries using a hash on the RDN. 406 final ArgumentParser splitUsingHashOnRDNParser = new ArgumentParser( 407 "split-using-hash-on-rdn", INFO_SPLIT_LDIF_SC_HASH_ON_RDN_DESC.get()); 408 409 splitUsingHashOnRDNNumSets = new IntegerArgument(null, "numSets", true, 1, 410 null, INFO_SPLIT_LDIF_SC_HASH_ON_RDN_ARG_DESC_NUM_SETS.get(), 2, 411 Integer.MAX_VALUE); 412 splitUsingHashOnRDNNumSets.addLongIdentifier("num-sets", true); 413 splitUsingHashOnRDNParser.addArgument(splitUsingHashOnRDNNumSets); 414 415 final LinkedHashMap<String[],String> splitUsingHashOnRDNExamples = 416 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 417 splitUsingHashOnRDNExamples.put( 418 new String[] 419 { 420 "split-using-hash-on-rdn", 421 "--sourceLDIF", "whole.ldif", 422 "--targetLDIFBasePath", "split.ldif", 423 "--splitBaseDN", "ou=People,dc=example,dc=com", 424 "--numSets", "4", 425 "--schemaPath", "config/schema", 426 "--addEntriesOutsideSplitBaseDNToAllSets" 427 }, 428 INFO_SPLIT_LDIF_SC_HASH_ON_RDN_EXAMPLE.get()); 429 430 splitUsingHashOnRDN = new SubCommand("split-using-hash-on-rdn", 431 INFO_SPLIT_LDIF_SC_HASH_ON_RDN_DESC.get(), splitUsingHashOnRDNParser, 432 splitUsingHashOnRDNExamples); 433 splitUsingHashOnRDN.addName("hash-on-rdn", true); 434 435 parser.addSubCommand(splitUsingHashOnRDN); 436 437 438 // Add the subcommand used to split entries using a hash on a specified 439 // attribute. 440 final ArgumentParser splitUsingHashOnAttributeParser = new ArgumentParser( 441 "split-using-hash-on-attribute", 442 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_DESC.get()); 443 444 splitUsingHashOnAttributeAttributeName = new StringArgument(null, 445 "attributeName", true, 1, "{attr}", 446 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ATTR_NAME.get()); 447 splitUsingHashOnAttributeAttributeName.addLongIdentifier("attribute-name", 448 true); 449 splitUsingHashOnAttributeParser.addArgument( 450 splitUsingHashOnAttributeAttributeName); 451 452 splitUsingHashOnAttributeNumSets = new IntegerArgument(null, "numSets", 453 true, 1, null, INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_NUM_SETS.get(), 454 2, Integer.MAX_VALUE); 455 splitUsingHashOnAttributeNumSets.addLongIdentifier("num-sets", true); 456 splitUsingHashOnAttributeParser.addArgument( 457 splitUsingHashOnAttributeNumSets); 458 459 splitUsingHashOnAttributeUseAllValues = new BooleanArgument(null, 460 "useAllValues", 1, 461 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ALL_VALUES.get()); 462 splitUsingHashOnAttributeUseAllValues.addLongIdentifier("use-all-values", 463 true); 464 splitUsingHashOnAttributeParser.addArgument( 465 splitUsingHashOnAttributeUseAllValues); 466 467 splitUsingHashOnAttributeAssumeFlatDIT = new BooleanArgument(null, 468 "assumeFlatDIT", 1, 469 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_ARG_DESC_ASSUME_FLAT_DIT.get()); 470 splitUsingHashOnAttributeAssumeFlatDIT.addLongIdentifier("assume-flat-dit", 471 true); 472 splitUsingHashOnAttributeParser.addArgument( 473 splitUsingHashOnAttributeAssumeFlatDIT); 474 475 final LinkedHashMap<String[],String> splitUsingHashOnAttributeExamples = 476 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 477 splitUsingHashOnAttributeExamples.put( 478 new String[] 479 { 480 "split-using-hash-on-attribute", 481 "--sourceLDIF", "whole.ldif", 482 "--targetLDIFBasePath", "split.ldif", 483 "--splitBaseDN", "ou=People,dc=example,dc=com", 484 "--attributeName", "uid", 485 "--numSets", "4", 486 "--schemaPath", "config/schema", 487 "--addEntriesOutsideSplitBaseDNToAllSets" 488 }, 489 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_EXAMPLE.get()); 490 491 splitUsingHashOnAttribute = new SubCommand("split-using-hash-on-attribute", 492 INFO_SPLIT_LDIF_SC_HASH_ON_ATTR_DESC.get(), 493 splitUsingHashOnAttributeParser, splitUsingHashOnAttributeExamples); 494 splitUsingHashOnAttribute.addName("hash-on-attribute", true); 495 496 parser.addSubCommand(splitUsingHashOnAttribute); 497 498 499 // Add the subcommand used to split entries by selecting the set with the 500 // fewest entries. 501 final ArgumentParser splitUsingFewestEntriesParser = new ArgumentParser( 502 "split-using-fewest-entries", 503 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_DESC.get()); 504 505 splitUsingFewestEntriesNumSets = new IntegerArgument(null, "numSets", 506 true, 1, null, 507 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_ARG_DESC_NUM_SETS.get(), 508 2, Integer.MAX_VALUE); 509 splitUsingFewestEntriesNumSets.addLongIdentifier("num-sets", true); 510 splitUsingFewestEntriesParser.addArgument(splitUsingFewestEntriesNumSets); 511 512 splitUsingFewestEntriesAssumeFlatDIT = new BooleanArgument(null, 513 "assumeFlatDIT", 1, 514 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_ARG_DESC_ASSUME_FLAT_DIT.get()); 515 splitUsingFewestEntriesAssumeFlatDIT.addLongIdentifier("assume-flat-dit", 516 true); 517 splitUsingFewestEntriesParser.addArgument( 518 splitUsingFewestEntriesAssumeFlatDIT); 519 520 final LinkedHashMap<String[],String> splitUsingFewestEntriesExamples = 521 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 522 splitUsingFewestEntriesExamples.put( 523 new String[] 524 { 525 "split-using-fewest-entries", 526 "--sourceLDIF", "whole.ldif", 527 "--targetLDIFBasePath", "split.ldif", 528 "--splitBaseDN", "ou=People,dc=example,dc=com", 529 "--numSets", "4", 530 "--schemaPath", "config/schema", 531 "--addEntriesOutsideSplitBaseDNToAllSets" 532 }, 533 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_EXAMPLE.get()); 534 535 splitUsingFewestEntries = new SubCommand("split-using-fewest-entries", 536 INFO_SPLIT_LDIF_SC_FEWEST_ENTRIES_DESC.get(), 537 splitUsingFewestEntriesParser, splitUsingFewestEntriesExamples); 538 splitUsingFewestEntries.addName("fewest-entries", true); 539 540 parser.addSubCommand(splitUsingFewestEntries); 541 542 543 // Add the subcommand used to split entries by selecting the set based on a 544 // filter. 545 final ArgumentParser splitUsingFilterParser = new ArgumentParser( 546 "split-using-filter", INFO_SPLIT_LDIF_SC_FILTER_DESC.get()); 547 548 splitUsingFilterFilter = new FilterArgument(null, "filter", true, 0, null, 549 INFO_SPLIT_LDIF_SC_FILTER_ARG_DESC_FILTER.get()); 550 splitUsingFilterParser.addArgument(splitUsingFilterFilter); 551 552 splitUsingFilterAssumeFlatDIT = new BooleanArgument(null, "assumeFlatDIT", 553 1, INFO_SPLIT_LDIF_SC_FILTER_ARG_DESC_ASSUME_FLAT_DIT.get()); 554 splitUsingFilterAssumeFlatDIT.addLongIdentifier("assume-flat-dit", true); 555 splitUsingFilterParser.addArgument(splitUsingFilterAssumeFlatDIT); 556 557 final LinkedHashMap<String[],String> splitUsingFilterExamples = 558 new LinkedHashMap<>(StaticUtils.computeMapCapacity(1)); 559 splitUsingFilterExamples.put( 560 new String[] 561 { 562 "split-using-filter", 563 "--sourceLDIF", "whole.ldif", 564 "--targetLDIFBasePath", "split.ldif", 565 "--splitBaseDN", "ou=People,dc=example,dc=com", 566 "--filter", "(timeZone=Eastern)", 567 "--filter", "(timeZone=Central)", 568 "--filter", "(timeZone=Mountain)", 569 "--filter", "(timeZone=Pacific)", 570 "--schemaPath", "config/schema", 571 "--addEntriesOutsideSplitBaseDNToAllSets" 572 }, 573 INFO_SPLIT_LDIF_SC_FILTER_EXAMPLE.get()); 574 575 splitUsingFilter = new SubCommand("split-using-filter", 576 INFO_SPLIT_LDIF_SC_FILTER_DESC.get(), 577 splitUsingFilterParser, splitUsingFilterExamples); 578 splitUsingFilter.addName("filter", true); 579 580 parser.addSubCommand(splitUsingFilter); 581 } 582 583 584 585 /** 586 * {@inheritDoc} 587 */ 588 @Override() 589 public void doExtendedArgumentValidation() 590 throws ArgumentException 591 { 592 // If multiple sourceLDIF values were provided, then a target LDIF base path 593 // must have been given. 594 final List<File> sourceLDIFValues = sourceLDIF.getValues(); 595 if (sourceLDIFValues.size() > 1) 596 { 597 if (! targetLDIFBasePath.isPresent()) 598 { 599 throw new ArgumentException(ERR_SPLIT_LDIF_NO_TARGET_BASE_PATH.get( 600 sourceLDIF.getIdentifierString(), 601 targetLDIFBasePath.getIdentifierString())); 602 } 603 } 604 605 606 // If the split-using-filter subcommand was provided, then at least two 607 // filters must have been provided, and none of the filters can be logically 608 // equivalent to any of the others. 609 if (splitUsingFilter.isPresent()) 610 { 611 final List<Filter> filterList = splitUsingFilterFilter.getValues(); 612 final Set<Filter> filterSet = new LinkedHashSet<>( 613 StaticUtils.computeMapCapacity(filterList.size())); 614 for (final Filter f : filterList) 615 { 616 if (filterSet.contains(f)) 617 { 618 throw new ArgumentException(ERR_SPLIT_LDIF_NON_UNIQUE_FILTER.get( 619 splitUsingFilterFilter.getIdentifierString(), f.toString())); 620 } 621 else 622 { 623 filterSet.add(f); 624 } 625 } 626 627 if (filterSet.size() < 2) 628 { 629 throw new ArgumentException(ERR_SPLIT_LDIF_NOT_ENOUGH_FILTERS.get( 630 splitUsingFilter.getPrimaryName(), 631 splitUsingFilterFilter.getIdentifierString())); 632 } 633 } 634 } 635 636 637 638 /** 639 * {@inheritDoc} 640 */ 641 @Override() 642 public ResultCode doToolProcessing() 643 { 644 // Get the schema to use during processing. 645 final Schema schema; 646 try 647 { 648 schema = getSchema(); 649 } 650 catch (final LDAPException le) 651 { 652 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, le.getMessage()); 653 return le.getResultCode(); 654 } 655 656 657 // If an encryption passphrase file is provided, then get the passphrase 658 // from it. 659 String encryptionPassphrase = null; 660 if (encryptionPassphraseFile.isPresent()) 661 { 662 try 663 { 664 encryptionPassphrase = ToolUtils.readEncryptionPassphraseFromFile( 665 encryptionPassphraseFile.getValue()); 666 } 667 catch (final LDAPException e) 668 { 669 Debug.debugException(e); 670 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, e.getMessage()); 671 return e.getResultCode(); 672 } 673 } 674 675 676 // Figure out which subcommand was selected, and create the appropriate 677 // translator to use to perform the processing. 678 final SplitLDIFTranslator translator; 679 if (splitUsingHashOnRDN.isPresent()) 680 { 681 translator = new SplitLDIFRDNHashTranslator(splitBaseDN.getValue(), 682 splitUsingHashOnRDNNumSets.getValue(), 683 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 684 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 685 } 686 else if (splitUsingHashOnAttribute.isPresent()) 687 { 688 translator = new SplitLDIFAttributeHashTranslator(splitBaseDN.getValue(), 689 splitUsingHashOnAttributeNumSets.getValue(), 690 splitUsingHashOnAttributeAttributeName.getValue(), 691 splitUsingHashOnAttributeUseAllValues.isPresent(), 692 splitUsingHashOnAttributeAssumeFlatDIT.isPresent(), 693 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 694 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 695 } 696 else if (splitUsingFewestEntries.isPresent()) 697 { 698 translator = new SplitLDIFFewestEntriesTranslator(splitBaseDN.getValue(), 699 splitUsingFewestEntriesNumSets.getValue(), 700 splitUsingFewestEntriesAssumeFlatDIT.isPresent(), 701 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 702 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 703 } 704 else if (splitUsingFilter.isPresent()) 705 { 706 final List<Filter> filterList = splitUsingFilterFilter.getValues(); 707 final LinkedHashSet<Filter> filterSet = new LinkedHashSet<>( 708 StaticUtils.computeMapCapacity(filterList.size())); 709 for (final Filter f : filterList) 710 { 711 filterSet.add(f); 712 } 713 714 translator = new SplitLDIFFilterTranslator(splitBaseDN.getValue(), 715 schema, filterSet, splitUsingFilterAssumeFlatDIT.isPresent(), 716 addEntriesOutsideSplitBaseDNToAllSets.isPresent(), 717 addEntriesOutsideSplitBaseDNToDedicatedSet.isPresent()); 718 } 719 else 720 { 721 // This should never happen. 722 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 723 ERR_SPLIT_LDIF_CANNOT_DETERMINE_SPLIT_ALGORITHM.get( 724 splitUsingHashOnRDN.getPrimaryName() + ", " + 725 splitUsingHashOnAttribute.getPrimaryName() + ", " + 726 splitUsingFewestEntries.getPrimaryName() + ", " + 727 splitUsingFilter.getPrimaryName())); 728 return ResultCode.PARAM_ERROR; 729 } 730 731 732 // Create the LDIF reader. 733 final LDIFReader ldifReader; 734 try 735 { 736 final InputStream inputStream; 737 if (sourceLDIF.isPresent()) 738 { 739 final ObjectPair<InputStream,String> p = 740 ToolUtils.getInputStreamForLDIFFiles(sourceLDIF.getValues(), 741 encryptionPassphrase, getOut(), getErr()); 742 inputStream = p.getFirst(); 743 if ((encryptionPassphrase == null) && (p.getSecond() != null)) 744 { 745 encryptionPassphrase = p.getSecond(); 746 } 747 } 748 else 749 { 750 inputStream = System.in; 751 } 752 753 ldifReader = new LDIFReader(inputStream, numThreads.getValue(), 754 translator); 755 if (schema != null) 756 { 757 ldifReader.setSchema(schema); 758 } 759 } 760 catch (final Exception e) 761 { 762 Debug.debugException(e); 763 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 764 ERR_SPLIT_LDIF_ERROR_CREATING_LDIF_READER.get( 765 StaticUtils.getExceptionMessage(e))); 766 return ResultCode.LOCAL_ERROR; 767 } 768 769 770 // Iterate through and process all of the entries. 771 ResultCode resultCode = ResultCode.SUCCESS; 772 final LinkedHashMap<String,OutputStream> outputStreams = 773 new LinkedHashMap<>(StaticUtils.computeMapCapacity(10)); 774 try 775 { 776 final AtomicLong entriesRead = new AtomicLong(0L); 777 final AtomicLong entriesExcluded = new AtomicLong(0L); 778 final TreeMap<String,AtomicLong> fileCounts = new TreeMap<>(); 779 780readLoop: 781 while (true) 782 { 783 final SplitLDIFEntry entry; 784 try 785 { 786 entry = (SplitLDIFEntry) ldifReader.readEntry(); 787 } 788 catch (final LDIFException le) 789 { 790 Debug.debugException(le); 791 resultCode = ResultCode.LOCAL_ERROR; 792 793 final File f = getOutputFile(SplitLDIFEntry.SET_NAME_ERRORS); 794 OutputStream s = outputStreams.get(SplitLDIFEntry.SET_NAME_ERRORS); 795 if (s == null) 796 { 797 try 798 { 799 s = new FileOutputStream(f); 800 801 if (encryptTarget.isPresent()) 802 { 803 if (encryptionPassphrase == null) 804 { 805 try 806 { 807 encryptionPassphrase = 808 ToolUtils.promptForEncryptionPassphrase(false, true, 809 getOut(), getErr()); 810 } 811 catch (final LDAPException ex) 812 { 813 Debug.debugException(ex); 814 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, ex.getMessage()); 815 return ex.getResultCode(); 816 } 817 } 818 819 s = new PassphraseEncryptedOutputStream(encryptionPassphrase, 820 s); 821 } 822 823 if (compressTarget.isPresent()) 824 { 825 s = new GZIPOutputStream(s); 826 } 827 828 outputStreams.put(SplitLDIFEntry.SET_NAME_ERRORS, s); 829 fileCounts.put(SplitLDIFEntry.SET_NAME_ERRORS, 830 new AtomicLong(0L)); 831 } 832 catch (final Exception e) 833 { 834 Debug.debugException(e); 835 resultCode = ResultCode.LOCAL_ERROR; 836 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 837 ERR_SPLIT_LDIF_CANNOT_OPEN_OUTPUT_FILE.get( 838 f.getAbsolutePath(), 839 StaticUtils.getExceptionMessage(e))); 840 break readLoop; 841 } 842 } 843 844 final ByteStringBuffer buffer = new ByteStringBuffer(); 845 buffer.append("# "); 846 buffer.append(le.getMessage()); 847 buffer.append(StaticUtils.EOL_BYTES); 848 849 final List<String> dataLines = le.getDataLines(); 850 if (dataLines != null) 851 { 852 for (final String dataLine : dataLines) 853 { 854 buffer.append(dataLine); 855 buffer.append(StaticUtils.EOL_BYTES); 856 } 857 } 858 859 buffer.append(StaticUtils.EOL_BYTES); 860 861 try 862 { 863 s.write(buffer.toByteArray()); 864 } 865 catch (final Exception e) 866 { 867 Debug.debugException(e); 868 resultCode = ResultCode.LOCAL_ERROR; 869 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 870 ERR_SPLIT_LDIF_ERROR_WRITING_ERROR_TO_FILE.get( 871 le.getMessage(), f.getAbsolutePath(), 872 StaticUtils.getExceptionMessage(e))); 873 break readLoop; 874 } 875 876 if (le.mayContinueReading()) 877 { 878 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 879 ERR_SPLIT_LDIF_INVALID_LDIF_RECORD_RECOVERABLE.get( 880 StaticUtils.getExceptionMessage(le))); 881 continue; 882 } 883 else 884 { 885 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 886 ERR_SPLIT_LDIF_INVALID_LDIF_RECORD_UNRECOVERABLE.get( 887 StaticUtils.getExceptionMessage(le))); 888 break; 889 } 890 } 891 catch (final IOException ioe) 892 { 893 Debug.debugException(ioe); 894 resultCode = ResultCode.LOCAL_ERROR; 895 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 896 ERR_SPLIT_LDIF_IO_READ_ERROR.get( 897 StaticUtils.getExceptionMessage(ioe))); 898 break; 899 } 900 catch (final Exception e) 901 { 902 Debug.debugException(e); 903 resultCode = ResultCode.LOCAL_ERROR; 904 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 905 ERR_SPLIT_LDIF_UNEXPECTED_READ_ERROR.get( 906 StaticUtils.getExceptionMessage(e))); 907 break; 908 } 909 910 if (entry == null) 911 { 912 break; 913 } 914 915 final long readCount = entriesRead.incrementAndGet(); 916 if ((readCount % 1000L) == 0) 917 { 918 // Even though we aren't done with this entry yet, we'll go ahead and 919 // log a progress message now because it's easier to do that now than 920 // to ensure that it's handled properly through all possible error 921 // conditions that need to be handled below. 922 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 923 INFO_SPLIT_LDIF_PROGRESS.get(readCount)); 924 } 925 926 927 // Get the set(s) to which the entry should be written. If this is 928 // null (which could be the case as a result of a race condition when 929 // using multiple threads where processing for a child completes before 930 // processing for its parent, or as a result of a case in which a 931 // child is included without or before its parent), then try to see if 932 // we can get the sets by passing the entry through the translator. 933 Set<String> sets = entry.getSets(); 934 byte[] ldifBytes = entry.getLDIFBytes(); 935 if (sets == null) 936 { 937 try 938 { 939 sets = translator.translate(entry, 0L).getSets(); 940 } 941 catch (final Exception e) 942 { 943 Debug.debugException(e); 944 } 945 946 if (sets == null) 947 { 948 final SplitLDIFEntry errorEntry = translator.createEntry(entry, 949 ERR_SPLIT_LDIF_ENTRY_WITHOUT_PARENT.get( 950 entry.getDN(), splitBaseDN.getStringValue()), 951 Collections.singleton(SplitLDIFEntry.SET_NAME_ERRORS)); 952 ldifBytes = errorEntry.getLDIFBytes(); 953 sets = errorEntry.getSets(); 954 } 955 } 956 957 958 // If the entry shouldn't be written into any sets, then we don't need 959 // to do anything else. 960 if (sets.isEmpty()) 961 { 962 entriesExcluded.incrementAndGet(); 963 continue; 964 } 965 966 967 // Write the entry into each of the target sets, creating the output 968 // files if necessary. 969 for (final String set : sets) 970 { 971 if (set.equals(SplitLDIFEntry.SET_NAME_ERRORS)) 972 { 973 // This indicates that an error was encountered during processing, 974 // so we'll update the result code to reflect that. 975 resultCode = ResultCode.LOCAL_ERROR; 976 } 977 978 final File f = getOutputFile(set); 979 OutputStream s = outputStreams.get(set); 980 if (s == null) 981 { 982 try 983 { 984 s = new FileOutputStream(f); 985 986 if (encryptTarget.isPresent()) 987 { 988 if (encryptionPassphrase == null) 989 { 990 try 991 { 992 encryptionPassphrase = 993 ToolUtils.promptForEncryptionPassphrase(false, true, 994 getOut(), getErr()); 995 } 996 catch (final LDAPException ex) 997 { 998 Debug.debugException(ex); 999 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, ex.getMessage()); 1000 return ex.getResultCode(); 1001 } 1002 } 1003 1004 s = new PassphraseEncryptedOutputStream(encryptionPassphrase, 1005 s); 1006 } 1007 1008 if (compressTarget.isPresent()) 1009 { 1010 s = new GZIPOutputStream(s); 1011 } 1012 1013 outputStreams.put(set, s); 1014 fileCounts.put(set, new AtomicLong(0L)); 1015 } 1016 catch (final Exception e) 1017 { 1018 Debug.debugException(e); 1019 resultCode = ResultCode.LOCAL_ERROR; 1020 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1021 ERR_SPLIT_LDIF_CANNOT_OPEN_OUTPUT_FILE.get( 1022 f.getAbsolutePath(), 1023 StaticUtils.getExceptionMessage(e))); 1024 break readLoop; 1025 } 1026 } 1027 1028 try 1029 { 1030 s.write(ldifBytes); 1031 } 1032 catch (final Exception e) 1033 { 1034 Debug.debugException(e); 1035 resultCode = ResultCode.LOCAL_ERROR; 1036 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1037 ERR_SPLIT_LDIF_ERROR_WRITING_TO_FILE.get( 1038 entry.getDN(), f.getAbsolutePath(), 1039 StaticUtils.getExceptionMessage(e))); 1040 break readLoop; 1041 } 1042 1043 fileCounts.get(set).incrementAndGet(); 1044 } 1045 } 1046 1047 1048 // Processing is complete. Summarize the processing that was performed. 1049 final long finalReadCount = entriesRead.get(); 1050 if (finalReadCount > 1000L) 1051 { 1052 out(); 1053 } 1054 1055 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1056 INFO_SPLIT_LDIF_PROCESSING_COMPLETE.get(finalReadCount)); 1057 1058 final long excludedCount = entriesExcluded.get(); 1059 if (excludedCount > 0L) 1060 { 1061 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1062 INFO_SPLIT_LDIF_EXCLUDED_COUNT.get(excludedCount)); 1063 } 1064 1065 for (final Map.Entry<String,AtomicLong> e : fileCounts.entrySet()) 1066 { 1067 final File f = getOutputFile(e.getKey()); 1068 wrapOut(0, MAX_OUTPUT_LINE_LENGTH, 1069 INFO_SPLIT_LDIF_COUNT_TO_FILE.get(e.getValue().get(), 1070 f.getName())); 1071 } 1072 } 1073 finally 1074 { 1075 try 1076 { 1077 ldifReader.close(); 1078 } 1079 catch (final Exception e) 1080 { 1081 Debug.debugException(e); 1082 } 1083 1084 for (final Map.Entry<String,OutputStream> e : outputStreams.entrySet()) 1085 { 1086 try 1087 { 1088 e.getValue().close(); 1089 } 1090 catch (final Exception ex) 1091 { 1092 Debug.debugException(ex); 1093 resultCode = ResultCode.LOCAL_ERROR; 1094 wrapErr(0, MAX_OUTPUT_LINE_LENGTH, 1095 ERR_SPLIT_LDIF_ERROR_CLOSING_FILE.get( 1096 getOutputFile(e.getKey()), 1097 StaticUtils.getExceptionMessage(ex))); 1098 } 1099 } 1100 } 1101 1102 return resultCode; 1103 } 1104 1105 1106 1107 /** 1108 * Retrieves the schema that should be used for processing. 1109 * 1110 * @return The schema that was created. 1111 * 1112 * @throws LDAPException If a problem is encountered while retrieving the 1113 * schema. 1114 */ 1115 private Schema getSchema() 1116 throws LDAPException 1117 { 1118 // If any schema paths were specified, then load the schema only from those 1119 // paths. 1120 if (schemaPath.isPresent()) 1121 { 1122 final ArrayList<File> schemaFiles = new ArrayList<>(10); 1123 for (final File path : schemaPath.getValues()) 1124 { 1125 if (path.isFile()) 1126 { 1127 schemaFiles.add(path); 1128 } 1129 else 1130 { 1131 final TreeMap<String,File> fileMap = new TreeMap<>(); 1132 for (final File schemaDirFile : path.listFiles()) 1133 { 1134 final String name = schemaDirFile.getName(); 1135 if (schemaDirFile.isFile() && name.toLowerCase().endsWith(".ldif")) 1136 { 1137 fileMap.put(name, schemaDirFile); 1138 } 1139 } 1140 schemaFiles.addAll(fileMap.values()); 1141 } 1142 } 1143 1144 if (schemaFiles.isEmpty()) 1145 { 1146 throw new LDAPException(ResultCode.PARAM_ERROR, 1147 ERR_SPLIT_LDIF_NO_SCHEMA_FILES.get( 1148 schemaPath.getIdentifierString())); 1149 } 1150 else 1151 { 1152 try 1153 { 1154 return Schema.getSchema(schemaFiles); 1155 } 1156 catch (final Exception e) 1157 { 1158 Debug.debugException(e); 1159 throw new LDAPException(ResultCode.LOCAL_ERROR, 1160 ERR_SPLIT_LDIF_ERROR_LOADING_SCHEMA.get( 1161 StaticUtils.getExceptionMessage(e))); 1162 } 1163 } 1164 } 1165 else 1166 { 1167 // If the INSTANCE_ROOT environment variable is set and it refers to a 1168 // directory that has a config/schema subdirectory that has one or more 1169 // schema files in it, then read the schema from that directory. 1170 try 1171 { 1172 final String instanceRootStr = 1173 StaticUtils.getEnvironmentVariable("INSTANCE_ROOT"); 1174 if (instanceRootStr != null) 1175 { 1176 final File instanceRoot = new File(instanceRootStr); 1177 final File configDir = new File(instanceRoot, "config"); 1178 final File schemaDir = new File(configDir, "schema"); 1179 if (schemaDir.exists()) 1180 { 1181 final TreeMap<String,File> fileMap = new TreeMap<>(); 1182 for (final File schemaDirFile : schemaDir.listFiles()) 1183 { 1184 final String name = schemaDirFile.getName(); 1185 if (schemaDirFile.isFile() && 1186 name.toLowerCase().endsWith(".ldif")) 1187 { 1188 fileMap.put(name, schemaDirFile); 1189 } 1190 } 1191 1192 if (! fileMap.isEmpty()) 1193 { 1194 return Schema.getSchema(new ArrayList<>(fileMap.values())); 1195 } 1196 } 1197 } 1198 } 1199 catch (final Exception e) 1200 { 1201 Debug.debugException(e); 1202 } 1203 } 1204 1205 1206 // If we've gotten here, then just return null and the tool will try to use 1207 // the default standard schema. 1208 return null; 1209 } 1210 1211 1212 1213 /** 1214 * Retrieves a file object that refers to an output file with the provided 1215 * extension. 1216 * 1217 * @param extension The extension to use for the file. 1218 * 1219 * @return A file object that refers to an output file with the provided 1220 * extension. 1221 */ 1222 private File getOutputFile(final String extension) 1223 { 1224 final File baseFile; 1225 if (targetLDIFBasePath.isPresent()) 1226 { 1227 baseFile = targetLDIFBasePath.getValue(); 1228 } 1229 else 1230 { 1231 baseFile = sourceLDIF.getValue(); 1232 } 1233 1234 return new File(baseFile.getAbsolutePath() + extension); 1235 } 1236 1237 1238 1239 /** 1240 * {@inheritDoc} 1241 */ 1242 @Override() 1243 public LinkedHashMap<String[],String> getExampleUsages() 1244 { 1245 final LinkedHashMap<String[],String> exampleMap = 1246 new LinkedHashMap<>(StaticUtils.computeMapCapacity(4)); 1247 1248 for (final Map.Entry<String[],String> e : 1249 splitUsingHashOnRDN.getExampleUsages().entrySet()) 1250 { 1251 exampleMap.put(e.getKey(), e.getValue()); 1252 } 1253 1254 for (final Map.Entry<String[],String> e : 1255 splitUsingHashOnAttribute.getExampleUsages().entrySet()) 1256 { 1257 exampleMap.put(e.getKey(), e.getValue()); 1258 } 1259 1260 for (final Map.Entry<String[],String> e : 1261 splitUsingFewestEntries.getExampleUsages().entrySet()) 1262 { 1263 exampleMap.put(e.getKey(), e.getValue()); 1264 } 1265 1266 for (final Map.Entry<String[],String> e : 1267 splitUsingFilter.getExampleUsages().entrySet()) 1268 { 1269 exampleMap.put(e.getKey(), e.getValue()); 1270 } 1271 1272 return exampleMap; 1273 } 1274}