src/main/java/com/aws/amazonmq/blog/testcases/FIFO_Testcase_3.java [36:88]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
		String amazonMQSSLEndPoint = args[0]; // OpenWire Endpoint for AmazonMQ Broker
		String username = args[1]; // Username
		String password = args[2]; // Password
		String queueName = args[3]; // name of the AmazonMQ Queue Name
		int numMsgs = Integer.valueOf(args[4]); // number of messages to be inserted by each producer
		String useCaseId = args[5];
		String prefetchSize = args[6];
		try {
			// Grab the Scheduler instance from the Factory and start it off
			Scheduler scheduler = StdSchedulerFactory.getDefaultScheduler();
			scheduler.start();
			// define producer jobs and tie them to MsgProducer which does the actual work
			JobDetail job1 = newJob(MsgProducer_FIFO.class).withIdentity(useCaseId.concat("-").concat("producer-job1"), "group1")
					.usingJobData("producerName", "producer_1")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
					.usingJobData("msgGroup", "Group-A")
					.usingJobData("numMsgs", numMsgs)
					.usingJobData("msgIdSequence", 250000)
					.usingJobData("msgPrefix", "A-").build();
			JobDetail job2 = newJob(MsgProducer_FIFO.class).withIdentity(useCaseId.concat("-").concat("producer-job2"), "group1")
					.usingJobData("producerName", "producer_2")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
					.usingJobData("msgGroup", "Group-B")
					.usingJobData("numMsgs", numMsgs)
					.usingJobData("msgIdSequence", 300000)
					.usingJobData("msgPrefix", "B-").build();
			JobDetail job3 = newJob(MsgProducer_FIFO.class).withIdentity(useCaseId.concat("-").concat("producer-job3"), "group1")
					.usingJobData("producerName", "producer_3")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
					.usingJobData("msgGroup", "Group-C")
					.usingJobData("numMsgs", numMsgs)
					.usingJobData("msgIdSequence", 350000)
					.usingJobData("msgPrefix", "C-").build();
			// define consumer jobs and tie them to MsgConsumer which does the actual work
			JobDetail job4 = newJob(MsgConsumer_CustomPrefetch.class).withIdentity(useCaseId.concat("-").concat("consumer-job1"), "group1")
					.usingJobData("consumerName", "consumer_1")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



src/main/java/com/aws/amazonmq/blog/testcases/FIFO_Testcase_6.java [40:92]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
		String amazonMQSSLEndPoint = args[0]; // OpenWire Endpoint for AmazonMQ Broker
		String username = args[1]; // Username
		String password = args[2]; // Password
		String queueName = args[3]; // name of the AmazonMQ Queue Name
		int numMsgs = Integer.valueOf(args[4]); // number of messages to be inserted by each producer
		String useCaseId = args[5];
		String prefetchSize = args[6];
		try {
			// Grab the Scheduler instance from the Factory and start it off
			Scheduler scheduler = StdSchedulerFactory.getDefaultScheduler();
			scheduler.start();
			// define producer jobs and tie them to MsgProducer which does the actual work
			JobDetail job1 = newJob(MsgProducer_FIFO.class).withIdentity(useCaseId.concat("-").concat("producer-job1"), "group1")
					.usingJobData("producerName", "producer_1")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
					.usingJobData("msgGroup", "Group-A")
					.usingJobData("numMsgs", numMsgs)
					.usingJobData("msgIdSequence", 250000)
					.usingJobData("msgPrefix", "A-").build();
			JobDetail job2 = newJob(MsgProducer_FIFO.class).withIdentity(useCaseId.concat("-").concat("producer-job2"), "group1")
					.usingJobData("producerName", "producer_2")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
					.usingJobData("msgGroup", "Group-B")
					.usingJobData("numMsgs", numMsgs)
					.usingJobData("msgIdSequence", 300000)
					.usingJobData("msgPrefix", "B-").build();
			JobDetail job3 = newJob(MsgProducer_FIFO.class).withIdentity(useCaseId.concat("-").concat("producer-job3"), "group1")
					.usingJobData("producerName", "producer_3")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
					.usingJobData("msgGroup", "Group-C")
					.usingJobData("numMsgs", numMsgs)
					.usingJobData("msgIdSequence", 350000)
					.usingJobData("msgPrefix", "C-").build();
			// define consumer jobs and tie them to MsgConsumer which does the actual work
			JobDetail job4 = newJob(MsgConsumer_CustomPrefetch.class).withIdentity(useCaseId.concat("-").concat("consumer-job1"), "group1")
					.usingJobData("consumerName", "consumer_1")
					.usingJobData("queueName", queueName)
					.usingJobData("amazonMQSSLEndPoint", amazonMQSSLEndPoint)
					.usingJobData("username", username)
					.usingJobData("password", password)
					.usingJobData("useCaseId", useCaseId)
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



