mirror of
https://github.com/Telecominfraproject/wlan-cloud-base.git
synced 2025-11-02 03:17:52 +00:00
added more base components
This commit is contained in:
@@ -42,6 +42,17 @@
|
||||
<module>../cloud-metrics</module>
|
||||
<module>../base-container</module>
|
||||
<module>../base-client</module>
|
||||
<module>../base-datastore-inmemory</module>
|
||||
<module>../base-hazelcast-client</module>
|
||||
<module>../base-hierarchical-datastore</module>
|
||||
<module>../base-jdbc</module>
|
||||
<module>../base-jdbc-tests</module>
|
||||
<module>../base-job</module>
|
||||
<module>../base-partitioner</module>
|
||||
<module>../base-remote-tests</module>
|
||||
<module>../base-scheduler</module>
|
||||
<module>../base-stream-interface</module>
|
||||
<module>../common-hazelcast</module>
|
||||
</modules>
|
||||
</profile>
|
||||
</profiles>
|
||||
|
||||
21
base-datastore-inmemory/pom.xml
Normal file
21
base-datastore-inmemory/pom.xml
Normal file
@@ -0,0 +1,21 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-datastore-inmemory</artifactId>
|
||||
<name>base-datastore-inmemory</name>
|
||||
<description>Common classes used by in-memory data sources - mainly in unit testing.</description>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-exceptions</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,53 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.datastore.inmemory;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.telecominfraproject.wlan.server.exceptions.GenericErrorException;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public abstract class BaseInMemoryDatastore {
|
||||
|
||||
/**
|
||||
* Wait to up to 5 seconds to catch up with current last mod
|
||||
*/
|
||||
private static final long NEXT_LASTMOD_WAIT_THRESHOLD = 5;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
protected BaseInMemoryDatastore() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the last modified timestamp based on the current one
|
||||
*
|
||||
* @param currentLastModTs
|
||||
* @return new last modified TS
|
||||
*/
|
||||
public static long getNewLastModTs(long currentLastModTs) {
|
||||
long result = System.currentTimeMillis();
|
||||
while (result <= currentLastModTs) {
|
||||
long diff = currentLastModTs - result;
|
||||
if (diff > TimeUnit.SECONDS.toMillis(NEXT_LASTMOD_WAIT_THRESHOLD)) {
|
||||
throw new IllegalArgumentException("Existing last modified TS is in the future");
|
||||
}
|
||||
if (diff > 0) {
|
||||
// pause till we have a time great than current lastMod
|
||||
try {
|
||||
Thread.sleep(diff + 1);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new GenericErrorException("Unable to generate the new last modified TS", e);
|
||||
}
|
||||
}
|
||||
result = System.currentTimeMillis();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
36
base-hazelcast-client/pom.xml
Normal file
36
base-hazelcast-client/pom.xml
Normal file
@@ -0,0 +1,36 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-hazelcast-client</artifactId>
|
||||
<name>base-hazelcast-client</name>
|
||||
<description>Common classes for accessing Hazelcast in-memory grid data store.</description>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.hazelcast</groupId>
|
||||
<artifactId>hazelcast</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.hazelcast</groupId>
|
||||
<artifactId>hazelcast-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>common-hazelcast</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-models</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,104 @@
|
||||
package com.telecominfraproject.wlan.hazelcast;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
|
||||
import com.hazelcast.config.Config;
|
||||
import com.hazelcast.config.GroupConfig;
|
||||
import com.hazelcast.config.InterfacesConfig;
|
||||
import com.hazelcast.config.JoinConfig;
|
||||
import com.hazelcast.config.MapConfig;
|
||||
import com.hazelcast.config.MulticastConfig;
|
||||
import com.hazelcast.config.QueueConfig;
|
||||
import com.hazelcast.config.TcpIpConfig;
|
||||
import com.hazelcast.core.Hazelcast;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.spi.properties.GroupProperty;
|
||||
|
||||
@Configuration
|
||||
@Profile("HazelcastForUnitTest")
|
||||
public class HazelcastForUnitTest {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HazelcastForUnitTest.class);
|
||||
|
||||
/**
|
||||
* Use this to register instance for shutdown. Each test case should have
|
||||
* its own HazelcastInstance and register it with the Manager.
|
||||
*
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public static class HazelcastUnitTestManager {
|
||||
final Set<String> instanceSet = new HashSet<>();
|
||||
|
||||
public void registerInstance(HazelcastInstance hazelcastInstance) {
|
||||
this.instanceSet.add(hazelcastInstance.getName());
|
||||
}
|
||||
|
||||
public void shutdownAllInstances() {
|
||||
for (String instanceName : instanceSet) {
|
||||
HazelcastInstance instance = Hazelcast.getHazelcastInstanceByName(instanceName);
|
||||
if (null != instance) {
|
||||
instance.shutdown();
|
||||
}
|
||||
}
|
||||
instanceSet.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set system property such that the test case will have it's own
|
||||
* HazelcastInstance.
|
||||
*
|
||||
* @param testClass
|
||||
*/
|
||||
public static void initializeSystemProperty(Class<?> testClass) {
|
||||
int hashCode = testClass.hashCode();
|
||||
System.setProperty("whizcontrol.hazelcast.groupName", "wc-dev" + hashCode);
|
||||
System.setProperty("whizcontrol.hazelcast.groupPassword", "wc-dev-pass" + hashCode);
|
||||
}
|
||||
}
|
||||
|
||||
@Bean
|
||||
public HazelcastInstance hazelcastInstanceTest() {
|
||||
// this is used for experiments and unit tests
|
||||
Config config = new Config();
|
||||
config.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
|
||||
config.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
|
||||
|
||||
GroupConfig groupConfig = new GroupConfig(System.getProperty("whizcontrol.hazelcast.groupName", "wc-dev"));
|
||||
groupConfig.setPassword(System.getProperty("whizcontrol.hazelcast.groupPassword", "wc-dev-pass"));
|
||||
config.setGroupConfig(groupConfig);
|
||||
config.getNetworkConfig().setPublicAddress("127.0.0.1").setPort(5900).setPortAutoIncrement(true)
|
||||
.setInterfaces(new InterfacesConfig().addInterface("127.0.0.1"));
|
||||
|
||||
JoinConfig joinCfg = new JoinConfig();
|
||||
joinCfg.setMulticastConfig(new MulticastConfig().setEnabled(false));
|
||||
joinCfg.setTcpIpConfig(new TcpIpConfig().setEnabled(true));
|
||||
|
||||
MapConfig mapConfigWildcard = new MapConfig();
|
||||
mapConfigWildcard.setName("ree-*").setBackupCount(0).setTimeToLiveSeconds(10 * 60);
|
||||
config.addMapConfig(mapConfigWildcard);
|
||||
mapConfigWildcard = new MapConfig();
|
||||
mapConfigWildcard.setName("se-*").setBackupCount(0).setTimeToLiveSeconds(10 * 60);
|
||||
config.addMapConfig(mapConfigWildcard);
|
||||
mapConfigWildcard = new MapConfig();
|
||||
mapConfigWildcard.setName("sm-*").setBackupCount(0).setTimeToLiveSeconds(10 * 60);
|
||||
config.addMapConfig(mapConfigWildcard);
|
||||
|
||||
QueueConfig queueConfig = new QueueConfig();
|
||||
queueConfig.setName("commands-q-*").setBackupCount(0).setMaxSize(5000);
|
||||
config.addQueueConfig(queueConfig);
|
||||
|
||||
HazelcastInstance hazelcastInstance = Hazelcast.newHazelcastInstance(config);
|
||||
|
||||
LOG.info("Configured Hazelcast Instance {} for unit tests", hazelcastInstance.getName());
|
||||
return hazelcastInstance;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.hazelcast.core.DistributedObject;
|
||||
import com.hazelcast.core.DistributedObjectEvent;
|
||||
import com.hazelcast.core.DistributedObjectListener;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
* This class cleans up locally-known distributed objects when their remote counterparts get destroyed
|
||||
*/
|
||||
public class ClientDistributedObjectListener implements DistributedObjectListener {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ClientDistributedObjectListener.class);
|
||||
|
||||
private final HazelcastInstance hazelcastInstance;
|
||||
|
||||
private final static boolean propagateHazelcastDestroyEvents = Boolean.getBoolean("whizcontrol.hazelcast.propagateDestroyEvents");
|
||||
|
||||
/**
|
||||
* This listener cleans up locally-known distributed objects when their remote counterparts get destroyed
|
||||
* @param hazelcastInstance
|
||||
*/
|
||||
public ClientDistributedObjectListener(HazelcastInstance hazelcastInstance){
|
||||
this.hazelcastInstance = hazelcastInstance;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void distributedObjectDestroyed(DistributedObjectEvent event) {
|
||||
LOG.info("Object destroyed {} : {}", event.getServiceName(), event.getObjectName());
|
||||
|
||||
if(propagateHazelcastDestroyEvents){
|
||||
//IMPORTANT: this was causing issue with infinite loop of destroy/create events when one of the
|
||||
// clients re-created the queue before all of the clients were done with destroying it.
|
||||
// This needs to be better understood and rewritten before enabling in production.
|
||||
String serviceName = event.getServiceName();
|
||||
String name = (String) event.getObjectName();
|
||||
//Need to iterate through the whole list,
|
||||
// cannot just call hazelcastInstance.getDistributedObject(service, name)
|
||||
// because that would implicitly create a new distributed object
|
||||
for(DistributedObject distObj: hazelcastInstance.getDistributedObjects()){
|
||||
if(distObj.getServiceName().equals(serviceName) && distObj.getName().equals(name)){
|
||||
distObj.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void distributedObjectCreated(DistributedObjectEvent event) {
|
||||
LOG.info("Object created {} : {}", event.getServiceName(), event.getObjectName());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,169 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
import org.springframework.core.env.Environment;
|
||||
|
||||
import com.hazelcast.client.config.ClientConfig;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.spi.properties.GroupProperty;
|
||||
import com.telecominfraproject.wlan.core.model.utils.SystemAndEnvPropertyResolver;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
@Configuration
|
||||
@Profile("use-hazelcast-client")
|
||||
public class HazelcastClientConfiguration {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HazelcastClientConfiguration.class);
|
||||
|
||||
private static String defaultAwsRegion;
|
||||
|
||||
@Autowired
|
||||
private Environment environment;
|
||||
|
||||
//
|
||||
// hazelcast group security settings
|
||||
//
|
||||
@Value("${whizcontrol.hazelcast.groupName:wc-dev}")
|
||||
private String groupName;
|
||||
@Value("${whizcontrol.hazelcast.groupPassword:wc-dev-pass}")
|
||||
private String groupPassword;
|
||||
|
||||
/**
|
||||
* comma-separated list of ipAddr:port of some of the nodes in the hazelcast
|
||||
* cluster
|
||||
*/
|
||||
@Value("${whizcontrol.hazelcast.nodeAddresses:127.0.0.1:5701}")
|
||||
private String nodeAddressesStr;
|
||||
|
||||
@Value("${whizcontrol.hazelcast.awsStackName:hcn01}")
|
||||
private String stackName;
|
||||
|
||||
@Value("${whizcontrol.hazelcast.reconnectTimeSec:3600}")
|
||||
private int reconnectTimeSec;
|
||||
|
||||
private String awsRegion;
|
||||
|
||||
/**
|
||||
* Default constructor, used when Spring framework is active
|
||||
*/
|
||||
public HazelcastClientConfiguration() {
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
public void setupConfiguration() {
|
||||
this.awsRegion = environment.getProperty("whizcontrol.hazelcast.awsRegion");
|
||||
}
|
||||
|
||||
/**
|
||||
* This constructor is for use outside of Spring framework, i.e. in EMR
|
||||
* Spark applications. This is configuration for AWS auto discovery
|
||||
* Hazelcast instance.
|
||||
*
|
||||
* @param groupName
|
||||
* @param groupPassword
|
||||
* @param stackName
|
||||
* @param awsRegion
|
||||
*/
|
||||
public HazelcastClientConfiguration(String groupName, String groupPassword, String stackName, String awsRegion) {
|
||||
this.groupName = groupName;
|
||||
this.groupPassword = groupPassword;
|
||||
this.nodeAddressesStr = null;
|
||||
this.stackName = stackName;
|
||||
this.reconnectTimeSec = 3600;
|
||||
this.awsRegion = awsRegion;
|
||||
}
|
||||
|
||||
/**
|
||||
* This constructor is for use outside of Spring framework, i.e. in EMR
|
||||
* Spark applications. This is configuration for unicast Hazelcast Instance
|
||||
*
|
||||
* @param groupName
|
||||
* @param groupPassword
|
||||
* @param nodeAddressesStr
|
||||
*/
|
||||
public HazelcastClientConfiguration(String groupName, String groupPassword, String nodeAddressesStr) {
|
||||
this.groupName = groupName;
|
||||
this.groupPassword = groupPassword;
|
||||
this.nodeAddressesStr = nodeAddressesStr;
|
||||
this.reconnectTimeSec = 3600;
|
||||
this.stackName = null;
|
||||
this.awsRegion = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return HazelcastClientConfiguration constructed from System Properties
|
||||
* or Environment Variables
|
||||
*/
|
||||
public static HazelcastClientConfiguration createOutsideOfSpringApp() {
|
||||
String nodeAddressesStr = SystemAndEnvPropertyResolver
|
||||
.getPropertyAsString("whizcontrol.hazelcast.nodeAddresses", null);
|
||||
String groupPassword = SystemAndEnvPropertyResolver.getPropertyAsString("whizcontrol.hazelcast.groupPassword",
|
||||
"wc-dev-pass");
|
||||
String groupName = SystemAndEnvPropertyResolver.getPropertyAsString("whizcontrol.hazelcast.groupName",
|
||||
"wc-dev");
|
||||
if (nodeAddressesStr == null) {
|
||||
String stackName = SystemAndEnvPropertyResolver.getPropertyAsString("whizcontrol.hazelcast.awsStackName",
|
||||
"hcn01");
|
||||
|
||||
String awsRegion = SystemAndEnvPropertyResolver
|
||||
.getPropertyAsString("whizcontrol.hazelcast.awsRegion", null);
|
||||
return new HazelcastClientConfiguration(groupName, groupPassword, stackName, awsRegion);
|
||||
}
|
||||
return new HazelcastClientConfiguration(groupName, groupPassword, nodeAddressesStr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create HazelcastInstance based on the configuration. If
|
||||
* {@link #nodeAddressesStr} is not null, it will create use
|
||||
* {@link #hazelcastClientUnicast()}. Otherwise it will use
|
||||
* {@link #hazelcastInstanceAwsDiscovery()}
|
||||
*
|
||||
* @return created hazelcastInstance
|
||||
* @throws IOException
|
||||
*/
|
||||
public HazelcastInstance createHazelcastInstance() throws IOException {
|
||||
return hazelcastClientUnicast();
|
||||
}
|
||||
|
||||
@Profile("!hazelcast-aws-discovery")
|
||||
@Bean
|
||||
public HazelcastInstance hazelcastClientUnicast() {
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
|
||||
clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
|
||||
|
||||
clientConfig.getGroupConfig().setName(groupName).setPassword(groupPassword);
|
||||
for (String addrStr : nodeAddressesStr.split(",")) {
|
||||
clientConfig.getNetworkConfig().addAddress(addrStr);
|
||||
}
|
||||
// see
|
||||
// http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of
|
||||
// the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
// the client will attempt to re-connect to the cluster forever if
|
||||
// cluster is not available
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance client = new ReConnectingHazelcastClient(clientConfig, reconnectTimeSec);
|
||||
|
||||
LOG.info("Configured Hazelcast client for cluster {}", nodeAddressesStr);
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client;
|
||||
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
|
||||
/**
|
||||
* @author erik
|
||||
*
|
||||
*/
|
||||
@Configuration
|
||||
@Profile("use-hazelcast-empty")
|
||||
public class HazelcastClientConfigurationEmpty {
|
||||
|
||||
@Bean
|
||||
HazelcastInstance hazelcastClient() {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,152 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.hazelcast.core.DistributedObject;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.map.impl.MapService;
|
||||
import com.telecominfraproject.wlan.core.model.filter.EntryFilter;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.core.model.json.interfaces.HasProducedTimestamp;
|
||||
|
||||
public class HazelcastUtils {
|
||||
|
||||
public static <T extends BaseJsonModel> List<T> getModelsFromHazelcastByMapPrefix(HazelcastInstance hazelcastInstance, String mapPrefix, long fromTime, long toTime, EntryFilter<T> entryFilter){
|
||||
if(hazelcastInstance==null){
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
//Need to be more specific with the map names.
|
||||
//Without delimiter a request for se_1 will pick up se_1_1, se_1_2 (which is fine),
|
||||
// but it will also pick up se_10_1, se114_3 (which is wrong)
|
||||
String mapPrefixWithDelimiter = mapPrefix + "_";
|
||||
|
||||
List<String> matchingMaps = new ArrayList<>();
|
||||
for(DistributedObject distObj: hazelcastInstance.getDistributedObjects()){
|
||||
if(distObj.getServiceName().equals(MapService.SERVICE_NAME) &&
|
||||
(distObj.getName().equals(mapPrefix) || distObj.getName().startsWith(mapPrefixWithDelimiter))){
|
||||
matchingMaps.add(distObj.getName());
|
||||
}
|
||||
}
|
||||
|
||||
List<T> ret = new ArrayList<>();
|
||||
|
||||
for(String mapName: matchingMaps){
|
||||
ret.addAll(getModelsFromHazelcast(hazelcastInstance, mapName, fromTime, toTime, entryFilter));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
public static <T extends BaseJsonModel> List<T> getModelsFromHazelcast(HazelcastInstance hazelcastInstance, String mapName, long fromTime, long toTime, EntryFilter<T> entryFilter){
|
||||
if(hazelcastInstance==null){
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
//if hazelcast datagrid is configured, retrieve records from it
|
||||
Map<Long, byte[]> reeMap = hazelcastInstance.getMap(mapName);
|
||||
|
||||
List<T> ret = new ArrayList<>();
|
||||
Iterator<Map.Entry<Long, byte[]>> iterator = reeMap.entrySet().iterator();
|
||||
Map.Entry<Long, byte[]> entry;
|
||||
while(iterator.hasNext()){
|
||||
entry = iterator.next();
|
||||
@SuppressWarnings("unchecked")
|
||||
T ree = (T) BaseJsonModel.fromZippedBytes(entry.getValue(), BaseJsonModel.class);
|
||||
|
||||
if(ree instanceof HasProducedTimestamp){
|
||||
HasProducedTimestamp record = (HasProducedTimestamp) ree;
|
||||
if(record.getProducedTimestampMs()>=fromTime && record.getProducedTimestampMs()<=toTime){
|
||||
T filteredRee = entryFilter.getFilteredEntry(ree);
|
||||
if(filteredRee!=null){
|
||||
ret.add(filteredRee);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
public static <T extends BaseJsonModel> int countModels(HazelcastInstance hazelcastInstance, String mapName, long fromTime, long toTime, EntryFilter<T> entryFilter){
|
||||
return getModelsFromHazelcast(hazelcastInstance, mapName, fromTime, toTime, entryFilter).size();
|
||||
}
|
||||
|
||||
public static <T extends BaseJsonModel> int countModelsByMapPrefix(HazelcastInstance hazelcastInstance, String mapPrefix, long fromTime, long toTime, EntryFilter<T> entryFilter){
|
||||
return getModelsFromHazelcastByMapPrefix(hazelcastInstance, mapPrefix, fromTime, toTime, entryFilter).size();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param list
|
||||
* @return max timestamp from the supplied list, or -1 if list is empty/null
|
||||
*/
|
||||
public static <T extends HasProducedTimestamp> long getMaxTimestamp(List<T> list){
|
||||
long ret=-1;
|
||||
|
||||
if(list==null){
|
||||
return ret;
|
||||
}
|
||||
|
||||
for(T item: list){
|
||||
if(item.getProducedTimestampMs()>ret){
|
||||
ret = item.getProducedTimestampMs();
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param destination
|
||||
* @param source
|
||||
* Copies items from the source list into destination list, but only those that have timestamp greater than max timestamp of the original destination list.
|
||||
* This is to deal with duplicate records (records with the same timestamp) that are present in both S3 and Hazelcast results
|
||||
*/
|
||||
public static <T extends HasProducedTimestamp> void combineLists(List<T> destination, List<T> source){
|
||||
long maxTs = getMaxTimestamp(destination);
|
||||
|
||||
Set<T> overlappingItems = new HashSet<>();
|
||||
//collect items from the destination that have maxTs - we'll use them to detect duplicates when merging lists
|
||||
for(T item: destination){
|
||||
if(item.getProducedTimestampMs()==maxTs){
|
||||
overlappingItems.add(item);
|
||||
}
|
||||
}
|
||||
|
||||
for(T item: source){
|
||||
if(item.getProducedTimestampMs()>maxTs || ( item.getProducedTimestampMs()==maxTs && !overlappingItems.contains(item) ) ){
|
||||
destination.add(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a DatagridMapName using the supplied mapPrefix, customerId and equipmentId.
|
||||
* @param mapPrefix
|
||||
* @param customerId
|
||||
* @param equipmentId
|
||||
* @return
|
||||
*/
|
||||
public static String getDatagridMapName(String mapPrefix, int customerId, long equipmentId){
|
||||
return getDatagridMapCustomerPrefix(mapPrefix, customerId)+"_"+equipmentId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a DatagridMapName using the supplied mapPrefix, and customerId.
|
||||
* @param mapPrefix
|
||||
* @param customerId
|
||||
* @return
|
||||
*/
|
||||
public static String getDatagridMapCustomerPrefix(String mapPrefix, int customerId){
|
||||
return mapPrefix + customerId;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,373 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.hazelcast.cardinality.CardinalityEstimator;
|
||||
import com.hazelcast.client.HazelcastClient;
|
||||
import com.hazelcast.client.config.ClientConfig;
|
||||
import com.hazelcast.config.Config;
|
||||
import com.hazelcast.core.ClientService;
|
||||
import com.hazelcast.core.Cluster;
|
||||
import com.hazelcast.core.DistributedObject;
|
||||
import com.hazelcast.core.DistributedObjectListener;
|
||||
import com.hazelcast.core.Endpoint;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.IAtomicLong;
|
||||
import com.hazelcast.core.IAtomicReference;
|
||||
import com.hazelcast.core.ICacheManager;
|
||||
import com.hazelcast.core.ICountDownLatch;
|
||||
import com.hazelcast.core.IExecutorService;
|
||||
import com.hazelcast.core.IList;
|
||||
import com.hazelcast.core.ILock;
|
||||
import com.hazelcast.core.IMap;
|
||||
import com.hazelcast.core.IQueue;
|
||||
import com.hazelcast.core.ISemaphore;
|
||||
import com.hazelcast.core.ISet;
|
||||
import com.hazelcast.core.ITopic;
|
||||
import com.hazelcast.core.IdGenerator;
|
||||
import com.hazelcast.core.LifecycleService;
|
||||
import com.hazelcast.core.MultiMap;
|
||||
import com.hazelcast.core.PartitionService;
|
||||
import com.hazelcast.core.ReplicatedMap;
|
||||
import com.hazelcast.cp.CPSubsystem;
|
||||
import com.hazelcast.crdt.pncounter.PNCounter;
|
||||
import com.hazelcast.durableexecutor.DurableExecutorService;
|
||||
import com.hazelcast.flakeidgen.FlakeIdGenerator;
|
||||
import com.hazelcast.logging.LoggingService;
|
||||
import com.hazelcast.mapreduce.JobTracker;
|
||||
import com.hazelcast.quorum.QuorumService;
|
||||
import com.hazelcast.ringbuffer.Ringbuffer;
|
||||
import com.hazelcast.scheduledexecutor.IScheduledExecutorService;
|
||||
import com.hazelcast.transaction.HazelcastXAResource;
|
||||
import com.hazelcast.transaction.TransactionContext;
|
||||
import com.hazelcast.transaction.TransactionException;
|
||||
import com.hazelcast.transaction.TransactionOptions;
|
||||
import com.hazelcast.transaction.TransactionalTask;
|
||||
|
||||
/**
|
||||
* Wrapper for the HazelcastClient that reconnects to the HC cluster at regular
|
||||
* intervals to spread the load across HC nodes
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class ReConnectingHazelcastClient implements HazelcastInstance {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ReConnectingHazelcastClient.class);
|
||||
|
||||
private final ClientConfig clientConfig;
|
||||
|
||||
private HazelcastInstance client;
|
||||
private boolean isTimeToReConnect;
|
||||
|
||||
public ReConnectingHazelcastClient(ClientConfig clientConfig, int reconnectTimeSec){
|
||||
this.clientConfig = clientConfig;
|
||||
this.client = newHazelcastClient();
|
||||
|
||||
if(reconnectTimeSec>0){
|
||||
Thread thr = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
while(true){
|
||||
try{
|
||||
Thread.sleep(1000L * reconnectTimeSec);
|
||||
}catch(InterruptedException e){
|
||||
Thread.currentThread().interrupt();
|
||||
break;
|
||||
}
|
||||
|
||||
synchronized(ReConnectingHazelcastClient.this){
|
||||
isTimeToReConnect = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
},"HC-Reconnect-kicker-thread");
|
||||
thr.setDaemon(true);
|
||||
thr.start();
|
||||
}
|
||||
}
|
||||
|
||||
private HazelcastInstance newHazelcastClient(){
|
||||
HazelcastInstance c = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
c.addDistributedObjectListener(new ClientDistributedObjectListener(c));
|
||||
return c;
|
||||
}
|
||||
|
||||
private synchronized HazelcastInstance getClient(){
|
||||
if(isTimeToReConnect){
|
||||
final HazelcastInstance oldClient = this.client;
|
||||
LOG.info("Re-connecting hazelcast client for load balancing across nodes");
|
||||
this.client = newHazelcastClient();
|
||||
Thread thr = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
//wait a bit to ensure that current operations had a chance to complete before shutting down old client
|
||||
try{
|
||||
Thread.sleep(300000);
|
||||
}catch(InterruptedException e){
|
||||
//do nothing
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
oldClient.shutdown();
|
||||
|
||||
}
|
||||
}, "HazelcastClient-shutdown-old-client-thread");
|
||||
|
||||
thr.setDaemon(true);
|
||||
thr.start();
|
||||
|
||||
isTimeToReConnect = false;
|
||||
}
|
||||
|
||||
if(this.client==null || !this.client.getLifecycleService().isRunning()){
|
||||
//whole cluster went down, client has been terminated, we'll attempt to re-create the client to connect again
|
||||
HazelcastInstance oldClient = this.client;
|
||||
if(oldClient!=null){
|
||||
oldClient.getLifecycleService().terminate();
|
||||
}
|
||||
|
||||
for(int i=0; i<Integer.getInteger("com.telecominfraproject.wlan.hazelcast.client.maxReconnectAttempts", 3000); i++){
|
||||
try {
|
||||
Thread.sleep(Integer.getInteger("com.telecominfraproject.wlan.hazelcast.client.sleepBeforeReconnectMs", 1000));
|
||||
} catch (InterruptedException e) {
|
||||
//do nothing
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
|
||||
LOG.warn("Re-connecting hazelcast client because the old one got disconnected");
|
||||
try{
|
||||
this.client = newHazelcastClient();
|
||||
break;
|
||||
}catch(Exception e){
|
||||
LOG.error("Client could not connect");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return this.client;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return getClient().getName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <E> IQueue<E> getQueue(String name) {
|
||||
return getClient().getQueue(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <E> ITopic<E> getTopic(String name) {
|
||||
return getClient().getTopic(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <E> ISet<E> getSet(String name) {
|
||||
return getClient().getSet(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <E> IList<E> getList(String name) {
|
||||
return getClient().getList(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <K, V> IMap<K, V> getMap(String name) {
|
||||
return getClient().getMap(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <K, V> ReplicatedMap<K, V> getReplicatedMap(String name) {
|
||||
return getClient().getReplicatedMap(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobTracker getJobTracker(String name) {
|
||||
return getClient().getJobTracker(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <K, V> MultiMap<K, V> getMultiMap(String name) {
|
||||
return getClient().getMultiMap(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ILock getLock(String key) {
|
||||
return getClient().getLock(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <E> Ringbuffer<E> getRingbuffer(String name) {
|
||||
return getClient().getRingbuffer(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <E> ITopic<E> getReliableTopic(String name) {
|
||||
return getClient().getReliableTopic(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Cluster getCluster() {
|
||||
return getClient().getCluster();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Endpoint getLocalEndpoint() {
|
||||
return getClient().getLocalEndpoint();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IExecutorService getExecutorService(String name) {
|
||||
return getClient().getExecutorService(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T executeTransaction(TransactionalTask<T> task) throws TransactionException {
|
||||
return getClient().executeTransaction(task);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task) throws TransactionException {
|
||||
return getClient().executeTransaction(options, task);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionContext newTransactionContext() {
|
||||
return getClient().newTransactionContext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionContext newTransactionContext(TransactionOptions options) {
|
||||
return getClient().newTransactionContext(options);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IdGenerator getIdGenerator(String name) {
|
||||
return getClient().getIdGenerator(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IAtomicLong getAtomicLong(String name) {
|
||||
return getClient().getAtomicLong(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <E> IAtomicReference<E> getAtomicReference(String name) {
|
||||
return getClient().getAtomicReference(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ICountDownLatch getCountDownLatch(String name) {
|
||||
return getClient().getCountDownLatch(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ISemaphore getSemaphore(String name) {
|
||||
return getClient().getSemaphore(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<DistributedObject> getDistributedObjects() {
|
||||
return getClient().getDistributedObjects();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String addDistributedObjectListener(DistributedObjectListener distributedObjectListener) {
|
||||
return getClient().addDistributedObjectListener(distributedObjectListener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean removeDistributedObjectListener(String registrationId) {
|
||||
return getClient().removeDistributedObjectListener(registrationId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Config getConfig() {
|
||||
return getClient().getConfig();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PartitionService getPartitionService() {
|
||||
return getClient().getPartitionService();
|
||||
}
|
||||
|
||||
@Override
|
||||
public QuorumService getQuorumService() {
|
||||
return getClient().getQuorumService();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClientService getClientService() {
|
||||
return getClient().getClientService();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LoggingService getLoggingService() {
|
||||
return getClient().getLoggingService();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LifecycleService getLifecycleService() {
|
||||
return getClient().getLifecycleService();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends DistributedObject> T getDistributedObject(String serviceName, String name) {
|
||||
return getClient().getDistributedObject(serviceName, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConcurrentMap<String, Object> getUserContext() {
|
||||
return getClient().getUserContext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public HazelcastXAResource getXAResource() {
|
||||
return getClient().getXAResource();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
getClient().shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DurableExecutorService getDurableExecutorService(String name) {
|
||||
return getClient().getDurableExecutorService(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ICacheManager getCacheManager() {
|
||||
return getClient().getCacheManager();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CardinalityEstimator getCardinalityEstimator(String name) {
|
||||
return getClient().getCardinalityEstimator(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IScheduledExecutorService getScheduledExecutorService(String name) {
|
||||
return getClient().getScheduledExecutorService(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PNCounter getPNCounter(String name) {
|
||||
return getClient().getPNCounter(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CPSubsystem getCPSubsystem() {
|
||||
return getClient().getCPSubsystem();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FlakeIdGenerator getFlakeIdGenerator(String name) {
|
||||
return getClient().getFlakeIdGenerator(name);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,647 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.hazelcast.client.HazelcastClient;
|
||||
import com.hazelcast.client.config.ClientConfig;
|
||||
import com.hazelcast.collection.impl.queue.QueueService;
|
||||
import com.hazelcast.core.DistributedObject;
|
||||
import com.hazelcast.core.DistributedObjectEvent;
|
||||
import com.hazelcast.core.DistributedObjectListener;
|
||||
import com.hazelcast.core.EntryEvent;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.IMap;
|
||||
import com.hazelcast.core.IQueue;
|
||||
import com.hazelcast.core.IdGenerator;
|
||||
import com.hazelcast.map.listener.EntryAddedListener;
|
||||
import com.hazelcast.map.listener.EntryEvictedListener;
|
||||
import com.hazelcast.map.listener.EntryRemovedListener;
|
||||
import com.hazelcast.query.Predicate;
|
||||
import com.telecominfraproject.wlan.hazelcast.common.SamplePredicate;
|
||||
|
||||
public class TestClient {
|
||||
|
||||
public static void main_1(String[] args) throws IOException {
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName("wc-dev").setPassword("wc-dev-pass");
|
||||
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5900");
|
||||
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5901");
|
||||
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5902");
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
Map<Integer, String> metricsPerDevice = client.getMap( "metricsPerDevice" );
|
||||
|
||||
System.out.println("metricsMap: " + metricsPerDevice);
|
||||
for(Map.Entry<Integer, String> entry: metricsPerDevice.entrySet()){
|
||||
System.out.println("metricsMap["+ entry.getKey() +"]: " +entry.getValue());
|
||||
}
|
||||
|
||||
IdGenerator testIdGenerator = client.getIdGenerator("id_generator_created_from_client");
|
||||
|
||||
Map<Long, String> mapCreatedFromClient = client.getMap( "map_created_from_client" );
|
||||
System.out.println("mapCreatedFromClient: " + mapCreatedFromClient);
|
||||
mapCreatedFromClient.put(testIdGenerator.newId(), Long.toString(System.currentTimeMillis()));
|
||||
for(Map.Entry<Long, String> entry: mapCreatedFromClient.entrySet()){
|
||||
System.out.println("mapCreatedFromClient["+ entry.getKey() +"]: " +entry.getValue());
|
||||
}
|
||||
|
||||
Map<Long, String> mapDynamicName = client.getMap( "metricsPerDevice-with-expiration-"+8 );
|
||||
long lv = testIdGenerator.newId();
|
||||
mapDynamicName.put(lv, "value-"+lv);
|
||||
System.out.println("mapDynamicName: " + mapDynamicName);
|
||||
for(Map.Entry<Long, String> entry: mapDynamicName.entrySet()){
|
||||
System.out.println("mapDynamicName["+ entry.getKey() +"]: " +entry.getValue());
|
||||
}
|
||||
|
||||
|
||||
System.out.println("Press Enter to terminate the client");
|
||||
System.in.read();
|
||||
System.exit(0);
|
||||
|
||||
}
|
||||
|
||||
|
||||
private static final String clusterName = System.getProperty("wc.hazelcast.clusterName", "wc-dev");
|
||||
private static final String password = System.getProperty("wc.hazelcast.clusterPassword", "wc-dev-pass");
|
||||
private static final String addr = System.getProperty("wc.hazelcast.clusterAddr", "127.0.0.1:5900");
|
||||
|
||||
private static class TestMapListener implements EntryAddedListener<String, String>,
|
||||
EntryRemovedListener<String, String>,
|
||||
EntryEvictedListener<String, String>, Serializable {
|
||||
|
||||
private static final long serialVersionUID = -1669018710360608086L;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestMapListener.class);
|
||||
|
||||
@Override
|
||||
public void entryEvicted(EntryEvent<String, String> event) {
|
||||
LOG.info( "{} Entry Evicted: {}", event.getSource(), event.getKey() );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void entryRemoved(EntryEvent<String, String> event) {
|
||||
LOG.info( "{} Entry Removed: {}", event.getSource(), event.getKey() );
|
||||
}
|
||||
|
||||
@Override
|
||||
public void entryAdded(EntryEvent<String, String> event) {
|
||||
LOG.info( "{} Entry Added: {}", event.getSource(), event.getKey() );
|
||||
}
|
||||
|
||||
|
||||
//This class is a singleton, all instances are meant to be equal
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return obj instanceof TestMapListener;
|
||||
}
|
||||
}
|
||||
|
||||
public static void main_2(String[] args) throws InterruptedException {
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5901");
|
||||
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5902");
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
//String[] mapNames = {"s3CreationTs-sm-", "s3CreationTs-sm_x5m-", "s3CreationTs-sm_x15m-"};
|
||||
String[] mapNames = {"testMap"};
|
||||
|
||||
for(String mName: mapNames){
|
||||
IMap<String, String> map = client.getMap( mName );
|
||||
|
||||
map.addEntryListener(new TestMapListener(), true);
|
||||
|
||||
map.put("t1", "v1", 15, TimeUnit.SECONDS);
|
||||
map.put("t2", "v2", 20, TimeUnit.SECONDS);
|
||||
map.put("t3", "v3");
|
||||
map.put("t4", "v4");
|
||||
System.out.println("Map: " + mName+ " size = "+ map.size());
|
||||
|
||||
for(Map.Entry<String,String> entry: map.entrySet()){
|
||||
System.out.format("%s -> %s %n", entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
System.out.println("Waiting 3...");
|
||||
Thread.sleep(TimeUnit.SECONDS.toMillis(3L));
|
||||
for(String mName: mapNames){
|
||||
IMap<String,String> map = client.getMap( mName );
|
||||
map.put("t3", "v3");
|
||||
System.out.println("Map: " + mName+ " size = "+ map.size());
|
||||
for(Map.Entry<String,String> entry: map.entrySet()){
|
||||
System.out.format("%s -> %s %n", entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
System.out.println("Waiting 7...");
|
||||
Thread.sleep(TimeUnit.SECONDS.toMillis(7L));
|
||||
|
||||
for(String mName: mapNames){
|
||||
IMap<String,String> map = client.getMap( mName );
|
||||
map.put("t1", "v1", 15, TimeUnit.SECONDS);
|
||||
System.out.println("Map: " + mName+ " size = "+ map.size());
|
||||
for(Map.Entry<String,String> entry: map.entrySet()){
|
||||
System.out.format("%s -> %s %n", entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
System.out.println("Waiting 10...");
|
||||
Thread.sleep(TimeUnit.SECONDS.toMillis(10L));
|
||||
|
||||
for(String mName: mapNames){
|
||||
Map<String,String> map = client.getMap( mName );
|
||||
System.out.println("Map: " + mName+ " size = "+ map.size());
|
||||
for(Map.Entry<String,String> entry: map.entrySet()){
|
||||
System.out.format("%s -> %s %n", entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
System.out.println("Waiting 10...");
|
||||
Thread.sleep(TimeUnit.SECONDS.toMillis(10L));
|
||||
|
||||
for(String mName: mapNames){
|
||||
Map<String,String> map = client.getMap( mName );
|
||||
System.out.println("Map: " + mName+ " size = "+ map.size());
|
||||
for(Map.Entry<String,String> entry: map.entrySet()){
|
||||
System.out.format("%s -> %s %n", entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
//Important!!!
|
||||
//Eviction may happen before specified time - when Hazelcast nodes are stopped/restarted/removed/added
|
||||
// (if the entry lives in the affected Hazelcast node)
|
||||
//Also, eviction events may be duplicated during cluster re-partitioning
|
||||
//
|
||||
//Cannot rely on eviction events for maintaining up-to-date data structures
|
||||
//
|
||||
|
||||
//testing lease acquisition and extension
|
||||
System.out.println("Testing Lease acquisition and renewal");
|
||||
for(int i = 0; i<10; i++){
|
||||
System.out.println("Waiting 10...");
|
||||
Thread.sleep(TimeUnit.SECONDS.toMillis(10L));
|
||||
|
||||
for(String mName: mapNames){
|
||||
IMap<String,String> map = client.getMap( mName );
|
||||
map.put("t10_lease", "v_"+System.currentTimeMillis(), 15, TimeUnit.SECONDS);
|
||||
System.out.println("Map: " + mName+ " size = "+ map.size());
|
||||
for(Map.Entry<String,String> entry: map.entrySet()){
|
||||
System.out.format("%s -> %s %n", entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
System.out.println("Stopped lease renewal");
|
||||
|
||||
System.out.println("Waiting 20...");
|
||||
Thread.sleep(TimeUnit.SECONDS.toMillis(20L));
|
||||
|
||||
for(String mName: mapNames){
|
||||
Map<String,String> map = client.getMap( mName );
|
||||
System.out.println("Map: " + mName+ " size = "+ map.size());
|
||||
for(Map.Entry<String,String> entry: map.entrySet()){
|
||||
System.out.format("%s -> %s %n", entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
System.exit(0);
|
||||
|
||||
}
|
||||
|
||||
|
||||
public static void main_3(String[] args) throws InterruptedException, ExecutionException {
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
// clientConfig.getNetworkConfig().addAddress("127.0.0.1:5901");
|
||||
// clientConfig.getNetworkConfig().addAddress("127.0.0.1:5902");
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
//HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
HazelcastInstance client = new ReConnectingHazelcastClient(clientConfig, 20);
|
||||
|
||||
ThreadPoolExecutor executor = new ThreadPoolExecutor(10, Integer.getInteger("maxTestThreads",10), 10, TimeUnit.SECONDS, new ArrayBlockingQueue<>(1000));
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
long end;
|
||||
|
||||
if(Boolean.getBoolean("populateMapBeforeTest")){
|
||||
List<Future> futures = new ArrayList<>(1000);
|
||||
|
||||
//populate map with entries
|
||||
for(int i=0; i<1000000; i++){
|
||||
final int fI = i;
|
||||
// for(int retries=0;retries<100000; retries++){
|
||||
// try{
|
||||
// Future f = executor.submit(new Runnable() {
|
||||
// @Override
|
||||
// public void run() {
|
||||
// map.put("t_"+fI, ""+fI);
|
||||
// }
|
||||
// });
|
||||
// futures.add(f);
|
||||
// break;
|
||||
// }catch(RejectedExecutionException e){
|
||||
// Thread.sleep(100);
|
||||
// continue;
|
||||
// }
|
||||
// }
|
||||
|
||||
futures.add(client.getMap("testMap").putAsync("t_" + Integer.toString(fI), Integer.toString(fI)));
|
||||
|
||||
//wait for a batch of futures to complete
|
||||
if(futures.size()>=990){
|
||||
for(Future f: futures){
|
||||
f.get();
|
||||
}
|
||||
|
||||
futures.clear();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for(Future f: futures){
|
||||
f.get();
|
||||
}
|
||||
|
||||
end = System.currentTimeMillis();
|
||||
|
||||
System.out.println("Map: size = "+ client.getMap( "testMap" ).size() + " took "+(end - start)+" ms to populate");
|
||||
}
|
||||
|
||||
//measure time it takes to iterate through the whole dataset
|
||||
start = System.currentTimeMillis();
|
||||
|
||||
//iterate through all map entries
|
||||
int evenCount = 0;
|
||||
for(IMap.Entry<?,?> entry: client.getMap( "testMap" ).entrySet()){
|
||||
//map.tryLock(key, time, timeunit, leaseTime, leaseTimeunit)
|
||||
if(Integer.parseInt((String)entry.getValue())%2==0){
|
||||
evenCount++;
|
||||
}
|
||||
}
|
||||
|
||||
end = System.currentTimeMillis();
|
||||
|
||||
System.out.println("Map: size = "+ client.getMap( "testMap" ).size() + " took "+(end - start)+" ms to iterate through all entries. Found "+ evenCount + " even values");
|
||||
|
||||
if(Boolean.getBoolean("iterateWithPredicateTest")){
|
||||
//iterate through all map entries with predicate - in all members in parallel
|
||||
evenCount = 0;
|
||||
Predicate<String, String> predicate = new SamplePredicate();
|
||||
|
||||
for(Map.Entry<?, ?> entry: client.getMap( "testMap" ).entrySet(predicate )){
|
||||
evenCount++;
|
||||
}
|
||||
|
||||
end = System.currentTimeMillis();
|
||||
|
||||
System.out.println("Map: size = "+ client.getMap( "testMap" ).size() + " took "+(end - start)+" ms to iterate through all entries with predicate (in parallel). Found "+ evenCount + " even values");
|
||||
}
|
||||
|
||||
if(Boolean.getBoolean("deleteMapAfterTest")){
|
||||
client.getMap( "testMap" ).destroy();
|
||||
}
|
||||
|
||||
System.exit(0);
|
||||
|
||||
}
|
||||
|
||||
public static void main_4(String[] args) {
|
||||
|
||||
if(args.length!=2){
|
||||
System.out.println("Usage: program serviceName objectName");
|
||||
System.out.println("Where serviceName is one of: ");
|
||||
String[] services = {"hz:impl:queueService", "hz:impl:mapService", "hz:impl:atomicLongService", "or any SERVICE_NAME of descentants of com.hazelcast.spi.ManagedService" };
|
||||
for(String s: services){
|
||||
System.out.println(s);
|
||||
}
|
||||
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
String serviceName = args[0];
|
||||
String objectName = args[1];
|
||||
|
||||
System.out.format("Removing object %s:%s %n", serviceName, objectName);
|
||||
DistributedObject obj = client.getDistributedObject(serviceName, objectName);
|
||||
obj.destroy();
|
||||
|
||||
if("hz:impl:queueService".equals(serviceName) && objectName.startsWith("re-q-Cu_")){
|
||||
client.getMap("rule-agent-q-assignments-map").remove(objectName);
|
||||
client.getMap("unassigned-re-queues-map").remove(objectName);
|
||||
client.getMap("agent-queue-initial-reserved-capacity-map").remove(objectName);
|
||||
}
|
||||
|
||||
System.out.println("done.");
|
||||
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
public static void main_5(String[] args) {
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
String testQName = "re-q-Cu_1";
|
||||
IMap<String,String> qAssignmentsMap = client.getMap("rule-agent-q-assignments-map");
|
||||
//qAssignmentsMap.put(testQName, ""+System.currentTimeMillis());
|
||||
|
||||
if(qAssignmentsMap.tryLock(testQName)){
|
||||
if(qAssignmentsMap.get(testQName)==null){
|
||||
System.out.println("Entry does not exist");
|
||||
} else {
|
||||
System.out.println("Entry exists : "+ qAssignmentsMap.get(testQName));
|
||||
}
|
||||
qAssignmentsMap.unlock(testQName);
|
||||
}
|
||||
|
||||
qAssignmentsMap.delete(testQName);
|
||||
|
||||
IQueue<byte[]> queue = client.getQueue(testQName);
|
||||
System.out.println("Entry exists : '"+ qAssignmentsMap.get(testQName)+"'");
|
||||
queue.clear();
|
||||
System.out.println("Entry exists : '"+ qAssignmentsMap.get(testQName)+"'");
|
||||
System.out.println("Queue cleared. Size = "+ queue.size());
|
||||
queue.destroy();
|
||||
System.out.println("Entry exists : "+ qAssignmentsMap.get(testQName));
|
||||
System.out.println("Queue destroyed. Size = "+ queue.size());
|
||||
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
public static void main_6(String[] args) {
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
String testQName = "re-q-Cu_1";
|
||||
IQueue<byte[]> agentQueue = client.getQueue(testQName);
|
||||
|
||||
while(true){
|
||||
byte[] eventBytes = ("evt-"+System.currentTimeMillis()).getBytes();
|
||||
if(!agentQueue.offer(eventBytes)){
|
||||
//agentQueue is full and cannot take any more events.
|
||||
//we will drain it of old events and insert our new event.
|
||||
agentQueue.clear();
|
||||
System.err.println("Cleared queue "+ testQName);
|
||||
|
||||
if(!agentQueue.offer(eventBytes)){
|
||||
System.err.println("Cannot enqueue event " + testQName);
|
||||
}
|
||||
}
|
||||
|
||||
if(agentQueue.size()%1000 == 0){
|
||||
System.out.println("Enqueued 1000 events, queue size is "+ agentQueue.size());
|
||||
}
|
||||
}
|
||||
|
||||
//System.exit(0);
|
||||
}
|
||||
|
||||
|
||||
public static void main_7(String[] args) {
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
String testQName = "re-q-Cu_1";
|
||||
IQueue<byte[]> agentQueue = client.getQueue(testQName);
|
||||
|
||||
while(true){
|
||||
|
||||
List<byte[]> batchBytes = new ArrayList<>();
|
||||
for(int i=0; i<20; i++){
|
||||
batchBytes.add(("evt-"+System.currentTimeMillis()).getBytes());
|
||||
}
|
||||
|
||||
boolean addedSuccessfully = false;
|
||||
try{
|
||||
addedSuccessfully = agentQueue.addAll(batchBytes);
|
||||
}catch(IllegalStateException e){
|
||||
//do nothing
|
||||
}
|
||||
|
||||
if(!addedSuccessfully){
|
||||
//agentQueue is full and cannot take any more events.
|
||||
//we will drain it of old events and insert our new events.
|
||||
agentQueue.clear();
|
||||
System.err.println("Cleared queue "+ testQName);
|
||||
|
||||
//try again the same operation
|
||||
try{
|
||||
addedSuccessfully = agentQueue.addAll(batchBytes);
|
||||
}catch(IllegalStateException e1){
|
||||
//do nothing
|
||||
}
|
||||
|
||||
if(!addedSuccessfully) {
|
||||
System.err.println("Cannot enqueue event " + testQName);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if(agentQueue.size()%1000 == 0){
|
||||
System.out.println("Enqueued 1000 events, queue size is "+ agentQueue.size());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
//System.exit(0);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance hazelcastClient = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
DistributedObjectListener distributedObjectListener = new DistributedObjectListener() {
|
||||
|
||||
@Override
|
||||
public void distributedObjectDestroyed(DistributedObjectEvent event) {
|
||||
System.out.println("Object destroyed " + event.getServiceName() +" : " + event.getObjectName());
|
||||
String serviceName = event.getServiceName();
|
||||
String name = (String) event.getObjectName();
|
||||
for(DistributedObject distObj: hazelcastClient.getDistributedObjects()){
|
||||
if(distObj.getServiceName().equals(serviceName) && distObj.getName().equals(name)){
|
||||
distObj.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void distributedObjectCreated(DistributedObjectEvent event) {
|
||||
System.out.println("Object created " + event.getServiceName() +" : " + event.getObjectName());
|
||||
}
|
||||
};
|
||||
hazelcastClient.addDistributedObjectListener(distributedObjectListener );
|
||||
|
||||
|
||||
System.err.println("*** initial state ***");
|
||||
|
||||
for(DistributedObject distributedObject: hazelcastClient.getDistributedObjects()){
|
||||
if(distributedObject.getServiceName().equals(QueueService.SERVICE_NAME)
|
||||
&& distributedObject.getName().startsWith("re-q-")
|
||||
){
|
||||
System.out.println(distributedObject.getName());
|
||||
}
|
||||
}
|
||||
|
||||
String queueName = "re-q-Cu_1";
|
||||
IQueue<byte[]> agentQueue = hazelcastClient.getQueue(queueName);
|
||||
agentQueue.size();
|
||||
|
||||
System.err.println("*** after queue.size() ***");
|
||||
|
||||
for(DistributedObject distributedObject: hazelcastClient.getDistributedObjects()){
|
||||
if(distributedObject.getServiceName().equals(QueueService.SERVICE_NAME)
|
||||
&& distributedObject.getName().startsWith("re-q-")
|
||||
){
|
||||
System.out.println(distributedObject.getName());
|
||||
}
|
||||
}
|
||||
|
||||
agentQueue.destroy();
|
||||
System.err.println("*** after queue.destroy() ***");
|
||||
|
||||
for(DistributedObject distributedObject: hazelcastClient.getDistributedObjects()){
|
||||
if(distributedObject.getServiceName().equals(QueueService.SERVICE_NAME)
|
||||
&& distributedObject.getName().startsWith("re-q-")
|
||||
){
|
||||
System.out.println(distributedObject.getName());
|
||||
}
|
||||
}
|
||||
|
||||
System.err.println("*** done ***");
|
||||
|
||||
// System.exit(0);
|
||||
}
|
||||
|
||||
public static void main_9(String[] args) throws InterruptedException {
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance hazelcastClient = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
DistributedObjectListener distributedObjectListener = new DistributedObjectListener() {
|
||||
|
||||
@Override
|
||||
public void distributedObjectDestroyed(DistributedObjectEvent event) {
|
||||
System.out.println("Object destroyed " + event.getServiceName() +" : " + event.getObjectName());
|
||||
String serviceName = event.getServiceName();
|
||||
String name = (String) event.getObjectName();
|
||||
for(DistributedObject distObj: hazelcastClient.getDistributedObjects()){
|
||||
if(distObj.getServiceName().equals(serviceName) && distObj.getName().equals(name)){
|
||||
distObj.destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void distributedObjectCreated(DistributedObjectEvent event) {
|
||||
System.out.println("Object created " + event.getServiceName() +" : " + event.getObjectName());
|
||||
}
|
||||
};
|
||||
hazelcastClient.addDistributedObjectListener(distributedObjectListener );
|
||||
|
||||
while(true){
|
||||
Thread.sleep(5000);
|
||||
System.err.println("*** -------------------------------- ***");
|
||||
|
||||
for(DistributedObject distributedObject: hazelcastClient.getDistributedObjects()){
|
||||
if(distributedObject.getServiceName().equals(QueueService.SERVICE_NAME)
|
||||
&& distributedObject.getName().startsWith("re-q-")
|
||||
){
|
||||
System.err.println(distributedObject.getName());
|
||||
}
|
||||
}
|
||||
System.err.println("*** =============================== ***");
|
||||
|
||||
}
|
||||
|
||||
//System.exit(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client.clu;
|
||||
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import com.hazelcast.client.HazelcastClient;
|
||||
import com.hazelcast.client.config.ClientConfig;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.IMap;
|
||||
import com.hazelcast.spi.properties.GroupProperty;
|
||||
import com.telecominfraproject.wlan.hazelcast.common.HazelcastObjectsConfiguration;
|
||||
|
||||
public class GetValues
|
||||
{
|
||||
|
||||
private static final String clusterName = System.getProperty("wc.hazelcast.clusterName", "wc-dev");
|
||||
private static final String password = System.getProperty("wc.hazelcast.clusterPassword", "wc-dev-pass");
|
||||
private static final String addr = System.getProperty("wc.hazelcast.clusterAddr", "127.0.0.1:5701");
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
// if(args.length==1){
|
||||
// System.out.println("Usage: program hashKey");
|
||||
// System.exit(1);
|
||||
// }
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
|
||||
clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
|
||||
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
HazelcastObjectsConfiguration hazelcastObjectsConfiguration = HazelcastObjectsConfiguration.createOutsideOfSpringApp();
|
||||
|
||||
IMap<Integer, Set<Long>> hashValues = client.getMap(hazelcastObjectsConfiguration.getHdsDirectoryCustomerEquipmentMapName());
|
||||
|
||||
if(hashValues != null)
|
||||
{
|
||||
for(Entry<Integer, Set<Long>> entry : hashValues.entrySet())
|
||||
{
|
||||
System.out.println("CustomerId: " + entry.getKey());
|
||||
System.out.println("EquipmentIds: " + entry.getValue());
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
System.out.println("No values found.");
|
||||
}
|
||||
|
||||
System.out.println("done.");
|
||||
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.client.clu;
|
||||
|
||||
import com.hazelcast.client.HazelcastClient;
|
||||
import com.hazelcast.client.config.ClientConfig;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.IMap;
|
||||
import com.hazelcast.spi.properties.GroupProperty;
|
||||
|
||||
public class SetProvisionedCapacity {
|
||||
|
||||
private static final String clusterName = System.getProperty("wc.hazelcast.clusterName", "wc-dev");
|
||||
private static final String password = System.getProperty("wc.hazelcast.clusterPassword", "wc-dev-pass");
|
||||
private static final String addr = System.getProperty("wc.hazelcast.clusterAddr", "127.0.0.1:5900");
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
if(args.length!=2){
|
||||
System.out.println("Usage: program re_queue_name provisioned_capacity");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
|
||||
ClientConfig clientConfig = new ClientConfig();
|
||||
clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
|
||||
clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
|
||||
|
||||
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
|
||||
clientConfig.getNetworkConfig().addAddress(addr);
|
||||
|
||||
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
|
||||
// here we're using "dumb" client that connects only to a single node of the cluster
|
||||
clientConfig.getNetworkConfig().setSmartRouting(false);
|
||||
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
|
||||
|
||||
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
|
||||
|
||||
String qName = args[0];
|
||||
float newProvisionedCapacity = Float.parseFloat(args[1]);
|
||||
|
||||
System.out.format("Setting provisioned capacity for '%s' to %s %n", args[0], args[1]);
|
||||
IMap<String,Float> initialReservedCapacityMap = client.getMap("agent-queue-initial-reserved-capacity-map");
|
||||
Float oldValue = initialReservedCapacityMap.put(qName, newProvisionedCapacity);
|
||||
if(oldValue!=null){
|
||||
System.out.println("Replaced old value: " + oldValue);
|
||||
}
|
||||
|
||||
System.out.println("done.");
|
||||
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
}
|
||||
31
base-hierarchical-datastore/pom.xml
Normal file
31
base-hierarchical-datastore/pom.xml
Normal file
@@ -0,0 +1,31 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-hierarchical-datastore</artifactId>
|
||||
<name>base-hierarchical-datastore</name>
|
||||
<description>Common classes used by the hierarchical data sources.</description>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-models</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-hazelcast-client</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>cloud-metrics</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,25 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.filter.EntryFilter;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.DirectoryIndex;
|
||||
|
||||
/**
|
||||
* This interface is used byHDS internally to apply a piece of logic to every data file traversed by method HierarchicalDatastore.processDataFiles().
|
||||
* @see com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore.processDataFiles(int, long, long, long, EntryFilter<T>, String, Set<String>, DataFileOperation<T>)
|
||||
* @author dtop
|
||||
*
|
||||
* @param <T> - Class of the entry records stored in the data files
|
||||
*/
|
||||
public interface DataFileOperation<T>{
|
||||
/**
|
||||
* @param entryFilter
|
||||
* @param indexName
|
||||
* @param indexedValues
|
||||
* @param dataFileNames - list of data file names to be processed
|
||||
* @param hourlyIdx - this parameter can be NULL !!! - means no hourly index is available
|
||||
*/
|
||||
void processFiles(EntryFilter<T> entryFilter, String indexName, Set<String> indexedValues, List<String> dataFileNames, DirectoryIndex hourlyIdx);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,181 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore;
|
||||
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.Formatter;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* This class provides an iterator that returns full names of the hourly index files for a given HDS, index, customer, equipment and time range.
|
||||
* We do not pre-build the complete list of files because it may be too long - depending on a time range.
|
||||
*
|
||||
* @author dtop
|
||||
*/
|
||||
public class HourlyIndexFileNames implements Iterable<String> {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HourlyIndexFileNames.class);
|
||||
|
||||
public final static String hourlyIndexFileNamePrefix = "hrIdx_";
|
||||
|
||||
private final int customerId;
|
||||
private final long equipmentId;
|
||||
private final long fromTime;
|
||||
private final long toTime;
|
||||
private final String indexName;
|
||||
private final String dsPrefix;
|
||||
private final String fileNamePrefix;
|
||||
private final int numberOfMinutesPerFile;
|
||||
private final int size;
|
||||
private final int hourIncrement;
|
||||
|
||||
|
||||
private boolean hasNextValue = true;
|
||||
private String nextValue;
|
||||
private Calendar fromCalendar;
|
||||
private Calendar toCalendar;
|
||||
|
||||
public HourlyIndexFileNames(int customerId, long equipmentId, long fromTime, long toTime, String indexName, String dsPrefix, String fileNamePrefix, int numberOfMinutesPerFile){
|
||||
|
||||
this.numberOfMinutesPerFile = numberOfMinutesPerFile;
|
||||
|
||||
if(numberOfMinutesPerFile<=60){
|
||||
hourIncrement = 1;
|
||||
} else {
|
||||
// if number of minutes per file is greater than one hour, not every hourly directory will contain the data files
|
||||
// we'll skip those hourly directories that do not have the data we're looking for
|
||||
if(numberOfMinutesPerFile%60!=0){
|
||||
throw new IllegalArgumentException("If number of minutes per file is greater than 60, it must be a multiple of 60");
|
||||
}
|
||||
hourIncrement = numberOfMinutesPerFile/60;
|
||||
}
|
||||
|
||||
if(fromTime>toTime){
|
||||
//invalid date range, return empty list
|
||||
LOG.debug("HourlyIndexFileNames for customer {} equipment {} from {} to {} built 0 files. invalid time range.", customerId, equipmentId, fromTime, toTime);
|
||||
this.hasNextValue = false;
|
||||
this.size = 0;
|
||||
} else {
|
||||
|
||||
//if toTime is in the future - set it back to now
|
||||
long currentTime = System.currentTimeMillis();
|
||||
if(toTime>currentTime){
|
||||
toTime = currentTime;
|
||||
}
|
||||
|
||||
//NOT needed, because each data file contains entries from beginning of a numberOfMinutesPerFile interval
|
||||
//keeping it here so we do not forget about it and not over-think this logic
|
||||
// //adjust fromTime so it catches file that may be on a boundary of an hour
|
||||
// fromTime = fromTime - 60000L*this.numberOfMinutesPerFile;
|
||||
|
||||
//adjust fromTime so it is a multiple of 1 hour
|
||||
fromTime = fromTime - fromTime%(TimeUnit.HOURS.toMillis(1) * getHourIncrement());
|
||||
|
||||
this.size = (int)((toTime - fromTime)/(TimeUnit.HOURS.toMillis(1) * getHourIncrement())) + 1;
|
||||
|
||||
fromCalendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
fromCalendar.setTime(new Date(fromTime));
|
||||
|
||||
toCalendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
toCalendar.setTime(new Date(toTime));
|
||||
|
||||
}
|
||||
|
||||
this.customerId = customerId;
|
||||
this.equipmentId = equipmentId;
|
||||
this.fromTime = fromTime;
|
||||
this.toTime = toTime;
|
||||
this.indexName = indexName;
|
||||
this.dsPrefix = dsPrefix;
|
||||
this.fileNamePrefix = fileNamePrefix;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return number of files that will be produced if caller iterates through all of them
|
||||
*/
|
||||
public int getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public int getHourIncrement() {
|
||||
return hourIncrement;
|
||||
}
|
||||
|
||||
private String getFileName(int year, int month, int day, int hour){
|
||||
StringBuilder sb = new StringBuilder(1024);
|
||||
Formatter formatter = new Formatter(sb, null);
|
||||
formatter.format("%s/%d/%d/%4d/%02d/%02d/%02d/%s%s_%s_%d_%d_%4d_%02d_%02d_%02d.zip",
|
||||
dsPrefix, customerId, equipmentId, year, month, day, hour,
|
||||
hourlyIndexFileNamePrefix, indexName, fileNamePrefix,
|
||||
customerId, equipmentId, year, month, day, hour
|
||||
);
|
||||
formatter.close();
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private void advanceToNextValue() {
|
||||
// generate list of files based on supplied criteria
|
||||
if (hasNextValue) {
|
||||
|
||||
int year = fromCalendar.get(Calendar.YEAR);
|
||||
int month = fromCalendar.get(Calendar.MONTH) + 1;
|
||||
int day = fromCalendar.get(Calendar.DAY_OF_MONTH);
|
||||
int hour = fromCalendar.get(Calendar.HOUR_OF_DAY);
|
||||
|
||||
nextValue = getFileName(year, month, day, hour);
|
||||
|
||||
// advance time for the next iteration
|
||||
fromCalendar.add(Calendar.HOUR_OF_DAY, getHourIncrement());
|
||||
hasNextValue = fromCalendar.before(toCalendar) || fromCalendar.equals(toCalendar);
|
||||
} else {
|
||||
nextValue = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<String> iterator() {
|
||||
return new Iterator<String>() {
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return hasNextValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String next() {
|
||||
if (!hasNextValue) {
|
||||
throw new NoSuchElementException("No more element");
|
||||
}
|
||||
advanceToNextValue();
|
||||
return nextValue;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
long fromTime = System.currentTimeMillis()
|
||||
- TimeUnit.MINUTES.toMillis(30)
|
||||
- TimeUnit.HOURS.toMillis(33);
|
||||
long toTime = System.currentTimeMillis();
|
||||
|
||||
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS XXX");
|
||||
sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
System.out.println("From: "+ sdf.format(new Date(fromTime))+" To: "+sdf.format(new Date(toTime)));
|
||||
|
||||
HourlyIndexFileNames hifn = new HourlyIndexFileNames(1, 2, fromTime, toTime, "metricDataType", "dev1", "sm_x24h", 24*60);
|
||||
|
||||
for(String fn: hifn){
|
||||
System.out.println(fn);
|
||||
}
|
||||
|
||||
System.out.println("size: "+ hifn.getSize());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Implementing <a href=
|
||||
* "https://github.com/aws/aws-sdk-java/issues/1111">https://github.com/aws/aws-sdk-java/issues/1111</a>
|
||||
* in response to NAAS-9238
|
||||
*
|
||||
* @author ekeddy
|
||||
*
|
||||
*/
|
||||
public class SelfDrainingZipInputStream extends ZipInputStream {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SelfDrainingZipInputStream.class);
|
||||
|
||||
public SelfDrainingZipInputStream(InputStream in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
// Drain before closing to keep S3 client happy
|
||||
while (getNextEntry() != null) {
|
||||
}
|
||||
LOG.debug("Draining inputstream");
|
||||
// drain the InputStream
|
||||
while (in.read() >= 0) {
|
||||
}
|
||||
super.close();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.index;
|
||||
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* This class represents a directory index that is built from record index files.
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class DirectoryIndex extends BaseJsonModel{
|
||||
|
||||
private static final long serialVersionUID = -2899289802601058048L;
|
||||
|
||||
private String name;
|
||||
|
||||
private Map<String, RecordIndex> dataFileNameToRecordIndexMap = new HashMap<>();
|
||||
|
||||
public Map<String, RecordIndex> getDataFileNameToRecordIndexMap() {
|
||||
return dataFileNameToRecordIndexMap;
|
||||
}
|
||||
public void setDataFileNameToRecordIndexMap(Map<String, RecordIndex> dataFileNameToRecordIndexMap) {
|
||||
this.dataFileNameToRecordIndexMap = dataFileNameToRecordIndexMap;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
private Map<String, Long> fileNameTimeCache = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* @param fromTime
|
||||
* @param toTime
|
||||
* @return - subset of files in this index that contain data between fromTime and toTime
|
||||
*/
|
||||
@JsonIgnore
|
||||
public Set<String> getDataFileNames(long fromTime, long toTime, int minutesPerDataFile) {
|
||||
Set<String> ret = new HashSet<>();
|
||||
Long ts;
|
||||
|
||||
//to catch the first file we need to adjust from time
|
||||
fromTime = fromTime - fromTime%TimeUnit.MINUTES.toMillis(minutesPerDataFile);
|
||||
|
||||
for(String dfn: dataFileNameToRecordIndexMap.keySet()){
|
||||
ts = fileNameTimeCache.get(dfn);
|
||||
if(ts == null){
|
||||
ts = extractTimeFromTheDataFileName(dfn);
|
||||
fileNameTimeCache.put(dfn, ts);
|
||||
}
|
||||
|
||||
if(ts>=fromTime && ts <=toTime){
|
||||
ret.add(dfn);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param dataFileName
|
||||
* @return timestamp in ms, extracted from the file name, or -1 if timestamp cannot be extracted
|
||||
*/
|
||||
public static long extractTimeFromTheDataFileName(String dataFileName){
|
||||
//data file name is formatted as follows:
|
||||
// Formatter formatter = new Formatter(sb, null);
|
||||
// formatter.format("%s/%d/%d/%4d/%02d/%02d/%02d/%s_%d_%d_%4d_%02d_%02d_%02d_%02d_%d.zip",
|
||||
// dsPrefix, customerId, equipmentId, year, month, day, hour,
|
||||
// fileNamePrefix, customerId, equipmentId, year, month, day, hour, minute, createdTs
|
||||
// );
|
||||
try{
|
||||
String[] parts = dataFileName.split("_");
|
||||
int len = parts.length;
|
||||
// we are interested in the year, month, day, hour, minute parts
|
||||
int year = Integer.parseInt(parts[len-6]);
|
||||
int month = Integer.parseInt(parts[len-5]) - 1;
|
||||
int day = Integer.parseInt(parts[len-4]);
|
||||
int hour = Integer.parseInt(parts[len-3]);
|
||||
int minute = Integer.parseInt(parts[len-2]);
|
||||
|
||||
Calendar c = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
c.clear();
|
||||
c.set(year, month, day, hour, minute, 0);
|
||||
|
||||
return c.getTimeInMillis();
|
||||
} catch (Exception e){
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
long ts = DirectoryIndex.extractTimeFromTheDataFileName("/blah/blah_blah/ree_13_834_2016_12_03_04_26_1480739175816.zip");
|
||||
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS XXX");
|
||||
System.out.println("Time: "+ sdf.format(new Date(ts)));
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.index;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
public class RecordIndex extends BaseJsonModel {
|
||||
private static final long serialVersionUID = 4535638079969100441L;
|
||||
|
||||
private RecordIndexCounts counts;
|
||||
private RecordIndexPositions positions;
|
||||
|
||||
public RecordIndex(){}
|
||||
|
||||
public RecordIndex(RecordIndexCounts counts, RecordIndexPositions positions){
|
||||
this.counts = counts;
|
||||
this.positions = positions;
|
||||
}
|
||||
|
||||
public RecordIndexCounts getCounts() {
|
||||
return counts;
|
||||
}
|
||||
public void setCounts(RecordIndexCounts counts) {
|
||||
this.counts = counts;
|
||||
}
|
||||
public RecordIndexPositions getPositions() {
|
||||
return positions;
|
||||
}
|
||||
public void setPositions(RecordIndexPositions positions) {
|
||||
this.positions = positions;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.index;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* Class RecordIndexCounts and RecordIndexPositions represent record-level index in hierarchical datastore.
|
||||
* The goal of this index is to reduce number of records that need to be processed by json parser when performing filtering operations.
|
||||
* This index corresponds one-to-one to a data file, and it is usually written at the same time as the data file.
|
||||
* During data filtering operations the index should be taken as a hint - if it is missing, then full data file will be processed.
|
||||
* It should be possible to introduce new indexes after the fact - old data files can be scanned and new index files can be created.
|
||||
* Indexes are stored in the same directory as the data files they represent.
|
||||
* <br>
|
||||
* Index file name is structured as idx_[indexName]_[dataFileName] and it is not compressed.
|
||||
* Inside the index file archive there is one entry with a text file.
|
||||
* First line in that text file contains json object for RecordIndexCounts, second line contains json object for RecordIndexPositions
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class RecordIndexCounts extends BaseJsonModel {
|
||||
|
||||
private static final long serialVersionUID = 17672003429334228L;
|
||||
|
||||
private String name;
|
||||
private int totalCount;
|
||||
private Map<String, Integer> perValueCounts = new HashMap<>();
|
||||
|
||||
public int getTotalCount() {
|
||||
return totalCount;
|
||||
}
|
||||
public void setTotalCount(int totalCount) {
|
||||
this.totalCount = totalCount;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public Map<String, Integer> getPerValueCounts() {
|
||||
return perValueCounts;
|
||||
}
|
||||
public void setPerValueCounts(Map<String, Integer> perValueCounts) {
|
||||
this.perValueCounts = perValueCounts;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public int getCountForValue(String value){
|
||||
return perValueCounts.getOrDefault(value, 0);
|
||||
}
|
||||
|
||||
public void incrementCountForValue(String value){
|
||||
totalCount++;
|
||||
perValueCounts.put(value, getCountForValue(value) + 1 );
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.index;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* Class RecordIndexCounts and RecordIndexPositions represent record-level index in hierarchical datastore.
|
||||
* The goal of this index is to reduce number of records that need to be processed by json parser when performing filtering operations.
|
||||
* This index corresponds on-to-one to a data file in HDS, and it is usually written at the same time as the data file.
|
||||
* During data filtering operations the index should be taken as a hint - if it is missing, then full data file will be processed.
|
||||
* It should be possible to introduce new indexes after the fact - old data files can be scanned and new index files can be created.
|
||||
* Indexes are stored in the same directory as the data files they represent.
|
||||
* <br>
|
||||
* Index file name is structured as idx_[indexName]_[dataFileName] and it is not compressed.
|
||||
* Inside the index file archive there is one entry with a text file.
|
||||
* First line in that text file contains json object for RecordIndexCounts, second line contains json object for RecordIndexPositions
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class RecordIndexPositions extends BaseJsonModel {
|
||||
|
||||
private static final long serialVersionUID = 17672003429334228L;
|
||||
|
||||
private String name;
|
||||
private Map<String, List<Integer>> perValuePositions = new HashMap<>();
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public Map<String, List<Integer>> getPerValuePositions() {
|
||||
return perValuePositions;
|
||||
}
|
||||
public void setPerValuePositions(Map<String, List<Integer>> perValuePositions) {
|
||||
this.perValuePositions = perValuePositions;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public List<Integer> getPositionsForValue(String value){
|
||||
return perValuePositions.getOrDefault(value, Collections.emptyList());
|
||||
}
|
||||
|
||||
public void addPositionForValue(String value, int pos){
|
||||
List<Integer> positions = perValuePositions.get(value);
|
||||
if(positions==null){
|
||||
positions = new ArrayList<>();
|
||||
perValuePositions.put(value, positions);
|
||||
}
|
||||
|
||||
positions.add(pos);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.index;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
public interface RecordIndexValueExtractor {
|
||||
/**
|
||||
*
|
||||
* @param model
|
||||
* @return Set of index values that are extracted from the supplied model. If no values are extracted, then returned set will contain one empty string "".
|
||||
*/
|
||||
Set<String> extractValues(BaseJsonModel model);
|
||||
}
|
||||
@@ -0,0 +1,509 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.index.aggregator;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.Formatter;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.util.StreamUtils;
|
||||
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest;
|
||||
import com.telecominfraproject.wlan.hazelcast.common.HazelcastObjectsConfiguration;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HourlyIndexFileNames;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.DirectoryIndex;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndex;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexCounts;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexPositions;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexValueExtractor;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.registry.RecordIndexRegistry;
|
||||
import com.telecominfraproject.wlan.server.exceptions.GenericErrorException;
|
||||
|
||||
/**
|
||||
* This class produces a single hourly directory index file for a given
|
||||
* <pre>
|
||||
* <dsRootDirName, customer, equipment, RecordIndexRegistry, year, month, day, hour>
|
||||
* </pre>
|
||||
* It performs the following steps:
|
||||
* <ol>
|
||||
* <li>compute directory name from the given parameters
|
||||
* <li>get list of all files in that directory
|
||||
* <li>get content of all hourly index files in that directory
|
||||
* <li>check if oldest creation timestamp of the data files is older than the creation timestamp of the hourly index file
|
||||
* <li> if not ( a data file was created after the hourly index file, or if hourly index file is missing) - then re-build and overwrite hourly index file
|
||||
* <ol>
|
||||
* <li>for every data file that does not have all the record indexes - build missing record index files, store them in hazelcast.
|
||||
* Make sure to NOT accumulate content of the data files in memory.
|
||||
* <li>build hourly index file by combining the content of all record index files in the directory
|
||||
* <li>store hourly index file in the same directory in zipped format
|
||||
* </ol>
|
||||
* </ol>
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class HourlyIndexAggregatorHazelcastScalable {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HourlyIndexAggregatorHazelcastScalable.class);
|
||||
|
||||
private final String dsRootDirName;
|
||||
private final String dsPrefix;
|
||||
private final RecordIndexRegistry recordIndexRegistry;
|
||||
private final HazelcastInstance hazelcastClient;
|
||||
private final HazelcastObjectsConfiguration hazelcastObjectsConfiguration;
|
||||
|
||||
public HourlyIndexAggregatorHazelcastScalable(String dsRootDirName, String dsPrefix, RecordIndexRegistry recordIndexRegistry,
|
||||
HazelcastInstance hazelcastClient, HazelcastObjectsConfiguration hazelcastObjectsConfiguration) {
|
||||
this.dsRootDirName = dsRootDirName;
|
||||
this.dsPrefix = dsPrefix;
|
||||
this.recordIndexRegistry = recordIndexRegistry;
|
||||
this.hazelcastClient = hazelcastClient;
|
||||
this.hazelcastObjectsConfiguration = hazelcastObjectsConfiguration;
|
||||
}
|
||||
|
||||
/**
|
||||
* build hourly index for customer/equipment for all hours that fall within supplied time range [timeFromMs, timeToMs]
|
||||
* @param customerId
|
||||
* @param equipmentId
|
||||
* @param fromTimeMs
|
||||
* @param toTimeMs
|
||||
*/
|
||||
public void buildHourlyIndex(int customerId, long equipmentId, long fromTimeMs, long toTimeMs){
|
||||
|
||||
LOG.debug("started buildHourlyIndex({}, {}, {}, {})", customerId, equipmentId, fromTimeMs, toTimeMs);
|
||||
|
||||
//if toTime is in the future - set it back to 2 hours ago
|
||||
long currentTime = System.currentTimeMillis() - TimeUnit.HOURS.toMillis(2);
|
||||
|
||||
if(toTimeMs>currentTime){
|
||||
toTimeMs = currentTime;
|
||||
}
|
||||
|
||||
//adjust fromTime/toTime so they are on a boundary of GMT hour
|
||||
fromTimeMs = fromTimeMs - fromTimeMs%(TimeUnit.HOURS.toMillis(1));
|
||||
toTimeMs = toTimeMs - toTimeMs%(TimeUnit.HOURS.toMillis(1));
|
||||
|
||||
Calendar fromCalendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
fromCalendar.setTime(new Date(fromTimeMs));
|
||||
|
||||
Calendar toCalendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
toCalendar.setTime(new Date(toTimeMs));
|
||||
|
||||
//generate list of directories based on supplied criteria
|
||||
while(fromCalendar.before(toCalendar) || fromCalendar.equals(toCalendar)){
|
||||
|
||||
final int year = fromCalendar.get(Calendar.YEAR);
|
||||
final int month = fromCalendar.get(Calendar.MONTH) + 1;
|
||||
final int day = fromCalendar.get(Calendar.DAY_OF_MONTH);
|
||||
final int hour = fromCalendar.get(Calendar.HOUR_OF_DAY);
|
||||
|
||||
buildHourlyIndexForSingleHour(customerId, equipmentId, year, month, day, hour);
|
||||
|
||||
//advance time to get directory for the next hour
|
||||
fromCalendar.add(Calendar.HOUR_OF_DAY, 1);
|
||||
}
|
||||
|
||||
LOG.debug("completed buildHourlyIndex({}, {}, {}, {})", customerId, equipmentId, fromTimeMs, toTimeMs);
|
||||
}
|
||||
|
||||
public void buildHourlyIndexForSingleHour(int customerId, long equipmentId, int year, int month, int day, int hour){
|
||||
LOG.info("started buildHourlyIndexForSingleHour({}, {}, {}, {}, {}, {})", customerId, equipmentId, year, month, day, hour);
|
||||
|
||||
//123wlan-datastore-us-east-1/dev1/13/834/2016/12/03/04/
|
||||
StringBuilder sb = new StringBuilder(1024);
|
||||
Formatter formatter = new Formatter(sb, null);
|
||||
formatter.format("%s/%d/%d/%4d/%02d/%02d/%02d/",
|
||||
dsPrefix, customerId, equipmentId, year, month, day, hour
|
||||
);
|
||||
formatter.close();
|
||||
|
||||
String dirKey = sb.toString();
|
||||
|
||||
StringBuilder sb1 = new StringBuilder(256);
|
||||
Formatter formatter1 = new Formatter(sb1, null);
|
||||
formatter1.format("_%d_%d_%4d_%02d_%02d_%02d",
|
||||
customerId, equipmentId, year, month, day, hour
|
||||
);
|
||||
formatter1.close();
|
||||
|
||||
String hourlyIndexSuffix = sb1.toString();
|
||||
|
||||
Map<String,Long> fileNameToLastModMap = HierarchicalDatastore.getFileNamesAndLastMods(dsRootDirName, dirKey);
|
||||
Set<String> dataFilesPrefixes = recordIndexRegistry.getAllFileNamePrefixes();
|
||||
Set<String> dataFilesPrefixesInUse = new HashSet<>();
|
||||
|
||||
Map<String, Set<String>> dataFilePrefixToNamesOfDataFilesMap = new HashMap<>();
|
||||
Set<String> tmpNamesOfDataFiles;
|
||||
String shortFileName;
|
||||
|
||||
for(String fName: fileNameToLastModMap.keySet()){
|
||||
//System.out.println(fName);
|
||||
shortFileName = fName.substring(fName.lastIndexOf('/')+1);
|
||||
for(String prefixName : dataFilesPrefixes){
|
||||
if(shortFileName.startsWith(prefixName)){
|
||||
tmpNamesOfDataFiles = dataFilePrefixToNamesOfDataFilesMap.get(prefixName);
|
||||
if(tmpNamesOfDataFiles==null){
|
||||
tmpNamesOfDataFiles = new HashSet<>();
|
||||
dataFilePrefixToNamesOfDataFilesMap.put(prefixName, tmpNamesOfDataFiles);
|
||||
}
|
||||
tmpNamesOfDataFiles.add(fName);
|
||||
dataFilesPrefixesInUse.add(prefixName);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//now process each dataFilePrefix separately - to make sure hourly index built for them is up to date
|
||||
for(String fNamePrefix: dataFilesPrefixesInUse){
|
||||
buildHourlyIndexesForSingleFileNamePrefix(dirKey, fNamePrefix, hourlyIndexSuffix,
|
||||
dataFilePrefixToNamesOfDataFilesMap.get(fNamePrefix), fileNameToLastModMap);
|
||||
}
|
||||
|
||||
LOG.info("completed buildHourlyIndexForSingleHour({}, {}, {}, {}, {}, {})", customerId, equipmentId, year, month, day, hour);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience class that keeps track of DirectoryIndex, its indexName, hourlyIdxFileName, hrIndexPrefix, and other metadata
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
private static class InternalDirectoryIndex{
|
||||
String indexName;
|
||||
String hourlyIdxFileName;
|
||||
String hrIndexPrefix;
|
||||
DirectoryIndex hourlyIdx;
|
||||
long hourlyIdxFileLastmod = 0;
|
||||
boolean needsToBeStored = false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @param directory - what directory to process for the hourly index file
|
||||
* @param fNamePrefix - file name prefix for the data files to process
|
||||
* @param hourlyIndexSuffix - in the form "_%d_%d_%4d_%02d_%02d_%02d"
|
||||
* @param namesOfDataFiles - subset of all file names as read from directory, all having fNamePrefix
|
||||
* @param fileNameToLastModMap - file names and their lastmod timestamps read from the directory
|
||||
*/
|
||||
public void buildHourlyIndexesForSingleFileNamePrefix(String directory, String fNamePrefix, String hourlyIndexSuffix,
|
||||
Set<String> namesOfDataFiles,
|
||||
Map<String,Long> fileNameToLastModMap){
|
||||
|
||||
LOG.debug("started buildHourlyIndexForSingleFileNamePrefix({})", fNamePrefix);
|
||||
|
||||
Set<String> indexNamesToProcess = recordIndexRegistry.getAllIndexesForFileNamePrefix(fNamePrefix);
|
||||
List<InternalDirectoryIndex> internalDirectoryIndexes = new ArrayList<>(indexNamesToProcess.size());
|
||||
|
||||
//populate InternalDirectoryIndex-es for current fileNamePrefix
|
||||
|
||||
for(String indexName: indexNamesToProcess){
|
||||
InternalDirectoryIndex internalDirIdx = new InternalDirectoryIndex();
|
||||
internalDirectoryIndexes.add(internalDirIdx);
|
||||
internalDirIdx.indexName = indexName;
|
||||
|
||||
//find the name of the hourly index file, if it is present
|
||||
internalDirIdx.hrIndexPrefix = HourlyIndexFileNames.hourlyIndexFileNamePrefix+indexName+"_"+fNamePrefix;
|
||||
for(String fName: fileNameToLastModMap.keySet()){
|
||||
if(fName.substring(fName.lastIndexOf('/')+1).startsWith(internalDirIdx.hrIndexPrefix)){
|
||||
internalDirIdx.hourlyIdxFileName = fName;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//If hourly idx file exists then we need to check if there are any existing index files
|
||||
// or data files that have lastMod after the hourly index was created
|
||||
// if they are present, then hourly index needs to be rebuilt.
|
||||
//We are covering the following cases in here:
|
||||
// 1. hourly index was initially created, but later new data file
|
||||
// appeared in the same directory (with or without its own index file)
|
||||
// 2. hourly index was initially created, later all record index files were
|
||||
// deleted (because we only need hourly index after it is built), and
|
||||
// later a new data file appeared in the same directory (with or
|
||||
// without its own index file)
|
||||
// In all of these cases
|
||||
// we need to get (or build) the record index file for the new data file,
|
||||
// and add it into the hourly index. And then save the hourly index with all the changes.
|
||||
|
||||
if(internalDirIdx.hourlyIdxFileName!=null){
|
||||
//retrieve existing hourly index file
|
||||
LOG.debug("hourly index found : {}", internalDirIdx.hourlyIdxFileName);
|
||||
internalDirIdx.hourlyIdx = HierarchicalDatastore.getZippedModelFromFile(dsRootDirName, internalDirIdx.hourlyIdxFileName, DirectoryIndex.class);
|
||||
internalDirIdx.hourlyIdxFileLastmod = fileNameToLastModMap.get(internalDirIdx.hourlyIdxFileName);
|
||||
} else {
|
||||
internalDirIdx.hourlyIdxFileName = directory + internalDirIdx.hrIndexPrefix + hourlyIndexSuffix+".zip";
|
||||
}
|
||||
|
||||
if(internalDirIdx.hourlyIdx == null){
|
||||
LOG.debug("hourly index NOT found : {}", internalDirIdx.hourlyIdxFileName);
|
||||
internalDirIdx.hourlyIdx = new DirectoryIndex();
|
||||
internalDirIdx.hourlyIdx.setName(indexName);
|
||||
internalDirIdx.needsToBeStored = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Summary of the logic below
|
||||
// Go through each datafile with fNamePrefix:
|
||||
// + if a record index object for that datafile is present in hourlyIdx,
|
||||
// and lastmod of that data file is older (less than) than hourlyIdxFileLastmod
|
||||
// then no action is required
|
||||
// + else if a record index object for that datafile is NOT present in hourlyIdx,
|
||||
// then build and merge that record index object into hourlyIdx, mark hourlyIdx as needsToBeStored
|
||||
// + else if a record index object for that datafile is present in hourlyIdx,
|
||||
// and lastmod of that data file is newer (greater than) than hourlyIdxFileLastmod
|
||||
// then build and replace that record index object into hourlyIdx, mark hourlyIdx as needsToBeStored
|
||||
|
||||
RecordIndex recordIndex;
|
||||
Long dataFileLastMod;
|
||||
boolean hourlyIdxContainsIndex;
|
||||
for(String dataFileName: namesOfDataFiles){
|
||||
|
||||
//determine what indexes need to be re-built for a given datafile
|
||||
List<InternalDirectoryIndex> indexesToRebuild = new ArrayList<>();
|
||||
for(InternalDirectoryIndex internalIdx: internalDirectoryIndexes){
|
||||
|
||||
dataFileLastMod = fileNameToLastModMap.get(dataFileName);
|
||||
hourlyIdxContainsIndex = internalIdx.hourlyIdx.getDataFileNameToRecordIndexMap().get(dataFileName) != null;
|
||||
if(hourlyIdxContainsIndex){
|
||||
//data file already present in the hourly index
|
||||
if(dataFileLastMod < internalIdx.hourlyIdxFileLastmod) {
|
||||
//nothing to do here, all up-to-date
|
||||
} else {
|
||||
// merge/replace record index for that new data file into hourlyIdx
|
||||
internalIdx.needsToBeStored = true;
|
||||
recordIndex = HierarchicalDatastore.findRecordIndex(hazelcastObjectsConfiguration.getRecordIndexMapPrefix(),
|
||||
fNamePrefix+"-", hazelcastClient, internalIdx.indexName, dataFileName);
|
||||
|
||||
if(recordIndex==null){
|
||||
//record index NOT found in hazelcast - possibly expired
|
||||
//we'll build on the fly
|
||||
LOG.debug("Could not get content of record index {} for data file {} - building record index from scratch", internalIdx.indexName, dataFileName);
|
||||
indexesToRebuild.add(internalIdx);
|
||||
} else {
|
||||
//record index found in hazelcast, will use it in directory index
|
||||
if(recordIndex!=null && recordIndex.getCounts()!=null && recordIndex.getPositions()!=null){
|
||||
internalIdx.hourlyIdx.getDataFileNameToRecordIndexMap().put(dataFileName, recordIndex);
|
||||
} else {
|
||||
LOG.error("Could not merge record index {} for data file {}", internalIdx.indexName, dataFileName);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
//data file is NOT present in the hourly index
|
||||
// build that record index file and merge it into hourlyIdx
|
||||
internalIdx.needsToBeStored = true;
|
||||
indexesToRebuild.add(internalIdx);
|
||||
}
|
||||
}
|
||||
|
||||
if(!indexesToRebuild.isEmpty()){
|
||||
//rebuild indexes for a given datafile, as determined above
|
||||
buildCountsAndPositions(dataFileName, fNamePrefix, indexesToRebuild);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//now, after processing all datafiles, store those directory indexes that were marked as needsToBeStored
|
||||
for(InternalDirectoryIndex internalDirIdx: internalDirectoryIndexes){
|
||||
if(internalDirIdx.needsToBeStored){
|
||||
//store zipped hourlyIdx under name hourlyIdxFileName
|
||||
storeZippedModelInFile(internalDirIdx.hourlyIdxFileName, internalDirIdx.hourlyIdx);
|
||||
}
|
||||
}
|
||||
|
||||
LOG.debug("completed buildHourlyIndexesForSingleFileNamePrefix({})", fNamePrefix);
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void storeZippedModelInFile(String fileName, BaseJsonModel model) {
|
||||
|
||||
LOG.info("storing {} in {}/{}", model.getClass().getSimpleName(), dsRootDirName, fileName);
|
||||
|
||||
|
||||
byte[] collectedBytes;
|
||||
|
||||
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream(10 * 1024);
|
||||
ZipOutputStream zipOutputStream = new ZipOutputStream(outputStream)) {
|
||||
// entry name is without ".zip"
|
||||
ZipEntry entry = new ZipEntry(fileName.substring(fileName.lastIndexOf('/'), fileName.length() - 4));
|
||||
zipOutputStream.putNextEntry(entry);
|
||||
|
||||
byte[] modelBytes = model.toString().getBytes(StandardCharsets.UTF_8);
|
||||
zipOutputStream.write(modelBytes);
|
||||
zipOutputStream.write(13);
|
||||
zipOutputStream.write(10);
|
||||
|
||||
zipOutputStream.closeEntry();
|
||||
zipOutputStream.flush();
|
||||
zipOutputStream.finish();
|
||||
|
||||
outputStream.flush();
|
||||
|
||||
collectedBytes = outputStream.toByteArray();
|
||||
} catch (IOException e) {
|
||||
throw new GenericErrorException("Cannot write zip entry into " + fileName, e);
|
||||
}
|
||||
|
||||
ByteArrayInputStream bais = new ByteArrayInputStream(collectedBytes);
|
||||
|
||||
LOG.info("Upload started (size {}): {}/{}", collectedBytes.length, dsRootDirName, fileName);
|
||||
try(FileOutputStream fos = new FileOutputStream(new File(dsRootDirName, fileName))) {
|
||||
StreamUtils.copy(bais, fos);
|
||||
fos.flush();
|
||||
|
||||
LOG.info("Upload complete: {}/{}", dsRootDirName, fileName);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Unable to upload stream into {}/{}, upload was aborted. {}", dsRootDirName, fileName, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Convenience class to group internal data structures for the record index
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
private static class InternalRecordIndex{
|
||||
String indexName;
|
||||
RecordIndexCounts recordIndexCounts = new RecordIndexCounts();
|
||||
RecordIndexPositions recordIndexPositions = new RecordIndexPositions();
|
||||
RecordIndexValueExtractor valueExtractor;
|
||||
|
||||
InternalRecordIndex(String fileNamePrefix, String indexName, RecordIndexRegistry recordIndexRegistry){
|
||||
this.indexName = indexName;
|
||||
recordIndexCounts.setName(indexName);
|
||||
recordIndexPositions.setName(indexName);
|
||||
valueExtractor = recordIndexRegistry.getIndexValueExtractor(fileNamePrefix, indexName);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void buildCountsAndPositions(String dataFileName, String fileNamePrefix, List<InternalDirectoryIndex> indexesToRebuild) {
|
||||
|
||||
Map<String, InternalRecordIndex> recordIndexes = new HashMap<>();
|
||||
|
||||
//initialize new record indexes
|
||||
for(InternalDirectoryIndex internalIdx: indexesToRebuild){
|
||||
recordIndexes.put(internalIdx.indexName, new InternalRecordIndex(fileNamePrefix, internalIdx.indexName, recordIndexRegistry));
|
||||
}
|
||||
|
||||
LOG.debug("Building record indexes for {}", dataFileName);
|
||||
|
||||
try(FileInputStream fis = new FileInputStream(new File(dsRootDirName, dataFileName))) {
|
||||
|
||||
ZipEntry ze;
|
||||
String zipEntryName;
|
||||
|
||||
try(ZipInputStream zis = new ZipInputStream(fis)){
|
||||
|
||||
while ((ze=zis.getNextEntry())!=null){
|
||||
zipEntryName = ze.getName();
|
||||
LOG.trace("Processing zip entry {}", zipEntryName);
|
||||
InputStreamReader isr = new InputStreamReader(zis, StandardCharsets.UTF_8);
|
||||
BufferedReader br = new BufferedReader(isr);
|
||||
|
||||
int lineNum = 0;
|
||||
Set<String> idxValues;
|
||||
|
||||
for(String line; (line = br.readLine()) != null; ) {
|
||||
|
||||
BaseJsonModel entity = null;
|
||||
try{
|
||||
entity = BaseJsonModel.fromString(line, BaseJsonModel.class);
|
||||
}catch(Exception e){
|
||||
LOG.debug("Could not deserialize entry {}", line);
|
||||
}
|
||||
|
||||
if(entity!=null){
|
||||
for(InternalRecordIndex iRecordIdx: recordIndexes.values()){
|
||||
idxValues = iRecordIdx.valueExtractor.extractValues(entity);
|
||||
for(String idxValue: idxValues){
|
||||
iRecordIdx.recordIndexCounts.incrementCountForValue(idxValue);
|
||||
iRecordIdx.recordIndexPositions.addPositionForValue(idxValue, lineNum);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
lineNum++;
|
||||
|
||||
}
|
||||
|
||||
LOG.trace("Read {} entries", lineNum);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} catch (FileNotFoundException e){
|
||||
LOG.trace("file {} does not exist", dataFileName);
|
||||
return;
|
||||
} catch (IOException e) {
|
||||
throw new GenericErrorException(e);
|
||||
}
|
||||
|
||||
|
||||
//merge freshly built record indexes into supplied indexesToRebuild
|
||||
for(InternalDirectoryIndex internalIdx: indexesToRebuild){
|
||||
InternalRecordIndex iRecordIdx = recordIndexes.get(internalIdx.indexName);
|
||||
RecordIndex recordIndex = new RecordIndex(iRecordIdx.recordIndexCounts, iRecordIdx.recordIndexPositions);
|
||||
internalIdx.hourlyIdx.getDataFileNameToRecordIndexMap().put(dataFileName, recordIndex);
|
||||
}
|
||||
|
||||
LOG.debug("Completed building record indexes for {}", dataFileName);
|
||||
|
||||
}
|
||||
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
RecordIndexRegistry rir = new RecordIndexRegistry();
|
||||
rir.postConstruct();
|
||||
HazelcastObjectsConfiguration hazelcastObjectsConfiguration = new HazelcastObjectsConfiguration();
|
||||
HazelcastInstance hazelcastClient = new HazelcastForUnitTest().hazelcastInstanceTest();
|
||||
HourlyIndexAggregatorHazelcastScalable hia = new HourlyIndexAggregatorHazelcastScalable(
|
||||
"/Users/dtop/hierarchical_ds", "dev1", rir, hazelcastClient, hazelcastObjectsConfiguration);
|
||||
|
||||
long startTime = System.currentTimeMillis()
|
||||
- TimeUnit.DAYS.toMillis(30)
|
||||
- TimeUnit.HOURS.toMillis(8)
|
||||
;
|
||||
|
||||
long endTime = startTime + 1;
|
||||
|
||||
long withIndexesTsStart = System.currentTimeMillis();
|
||||
//try with 1 hour and indexes existing
|
||||
hia.buildHourlyIndex(13, 834, startTime, endTime);
|
||||
long withIndexesTsEnd = System.currentTimeMillis();
|
||||
|
||||
System.out.println("Took "+ (withIndexesTsEnd - withIndexesTsStart) + " ms to re-build hourly index");
|
||||
|
||||
hazelcastClient.shutdown();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,234 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.index.registry;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexValueExtractor;
|
||||
|
||||
/**
|
||||
* This class defines all record indexes used by hds in our system
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
@Component
|
||||
public class RecordIndexRegistry {
|
||||
|
||||
//dtop: TODO: move these commented out parts into appropriate descendants of the hierarchical datastore
|
||||
/*
|
||||
public static enum EquipmentEventsIndex{
|
||||
eventType,
|
||||
clientMac
|
||||
}
|
||||
|
||||
public static enum SystemEventsIndex{
|
||||
payloadType,
|
||||
clientMac
|
||||
}
|
||||
|
||||
public static enum ServiceMetricsIndex{
|
||||
dataType,
|
||||
clientMac
|
||||
}
|
||||
|
||||
public static enum SingleValueMetricIndex{
|
||||
metricDataType;
|
||||
}
|
||||
|
||||
@Value("${whizcontrol.RawEquipmentEventDatastore.s3ds.fileNamePrefix:ree}")
|
||||
private String reeFileNamePrefix;
|
||||
|
||||
@Value("${whizcontrol.SystemEventDatastore.s3ds.fileNamePrefix:se}")
|
||||
private String seFileNamePrefix;
|
||||
|
||||
@Value("${whizcontrol.ServiceMetricsDatastore.s3ds.fileNamePrefix:sm}")
|
||||
private String smFileNamePrefix;
|
||||
*/
|
||||
|
||||
private Map<String, Map<String, RecordIndexValueExtractor>> fullIndexMap = new HashMap<>();
|
||||
private Set<String> fileNamePrefixes = new HashSet<>();
|
||||
|
||||
@PostConstruct
|
||||
public void postConstruct(){
|
||||
//we need for property values to be resolved in order to initialize internal maps
|
||||
|
||||
//dtop: TODO: move these commented out parts into appropriate descendants of the hierarchical datastore
|
||||
/*
|
||||
//for testing or running outside of spring environment
|
||||
reeFileNamePrefix = reeFileNamePrefix!=null?reeFileNamePrefix:"ree";
|
||||
seFileNamePrefix = seFileNamePrefix!=null?seFileNamePrefix:"se";
|
||||
smFileNamePrefix = smFileNamePrefix!=null?smFileNamePrefix:"sm";
|
||||
|
||||
Map<String, RecordIndexValueExtractor> reeIndexes = new HashMap<>();
|
||||
Map<String, RecordIndexValueExtractor> seIndexes = new HashMap<>();
|
||||
Map<String, RecordIndexValueExtractor> smIndexes = new HashMap<>();
|
||||
Map<String, RecordIndexValueExtractor> smSingleValueIndexes = new HashMap<>();
|
||||
|
||||
reeIndexes.put(EquipmentEventsIndex.eventType.toString(), new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
if(model instanceof BaseRawEquipmentEvent){
|
||||
return Collections.singleton(((BaseRawEquipmentEvent)model).getEventTypeName());
|
||||
}
|
||||
return Collections.singleton("");
|
||||
}
|
||||
});
|
||||
|
||||
reeIndexes.put(EquipmentEventsIndex.clientMac.toString(), new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
if(model instanceof ClientRawEventInterface ){
|
||||
ClientRawEventInterface crei = (ClientRawEventInterface) model;
|
||||
MacAddress mac = MacAddress.valueOf(crei.getDeviceMacAddress());
|
||||
|
||||
if(mac != null)
|
||||
{
|
||||
Long macAddress = mac.getAddressAsLong();
|
||||
|
||||
if(macAddress != null)
|
||||
{
|
||||
return Collections.singleton(macAddress.toString());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.singleton("");
|
||||
}
|
||||
});
|
||||
|
||||
seIndexes.put(SystemEventsIndex.payloadType.toString(), new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
if(model instanceof SystemEvent){
|
||||
return Collections.singleton(model.getClass().getSimpleName());
|
||||
}
|
||||
return Collections.singleton("");
|
||||
}
|
||||
});
|
||||
|
||||
seIndexes.put(SystemEventsIndex.clientMac.toString(), new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
if(model instanceof ClientSystemEventInterface ){
|
||||
ClientSystemEventInterface csei = (ClientSystemEventInterface) model;
|
||||
MacAddress mac = MacAddress.valueOf(csei.getDeviceMacAddress());
|
||||
|
||||
if(mac != null)
|
||||
{
|
||||
Long macAddress = mac.getAddressAsLong();
|
||||
|
||||
if(macAddress != null)
|
||||
{
|
||||
return Collections.singleton(macAddress.toString());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.singleton("");
|
||||
}
|
||||
});
|
||||
|
||||
smIndexes.put(ServiceMetricsIndex.dataType.toString(), new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
if(model instanceof SingleMetricRecord){
|
||||
return Collections.singleton(((SingleMetricRecord) model).getDataType());
|
||||
}
|
||||
return Collections.singleton("");
|
||||
}
|
||||
});
|
||||
|
||||
smIndexes.put(ServiceMetricsIndex.clientMac.toString(), new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
//here we are dealing with multi-value indexes - one record contains multiple MAC addresses
|
||||
if(model instanceof SingleMetricRecord){
|
||||
if(((SingleMetricRecord)model).getData() instanceof ClientMetricsInterface ){
|
||||
ClientMetricsInterface cmi = (ClientMetricsInterface) ((SingleMetricRecord)model).getData();
|
||||
Set<String> ret = new HashSet<>();
|
||||
|
||||
for(MacAddress mac: cmi.getDeviceMacAddresses())
|
||||
{
|
||||
Long macAddress = mac.getAddressAsLong();
|
||||
|
||||
if(macAddress != null)
|
||||
{
|
||||
ret.add(macAddress.toString());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if(!ret.isEmpty()){
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.singleton("");
|
||||
}
|
||||
});
|
||||
|
||||
smSingleValueIndexes.put(SingleValueMetricIndex.metricDataType.toString(), new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
if(model instanceof SingleValueMetric){
|
||||
return Collections.singleton(((SingleValueMetric) model).getMetricId().toString());
|
||||
}
|
||||
return Collections.singleton("");
|
||||
}
|
||||
});
|
||||
|
||||
fullIndexMap.put(reeFileNamePrefix, reeIndexes);
|
||||
fullIndexMap.put(seFileNamePrefix, seIndexes);
|
||||
fullIndexMap.put(smFileNamePrefix, smIndexes);
|
||||
fullIndexMap.put(smFileNamePrefix + TieredAggregationTable.table_x5m.getTableSuffix(), smSingleValueIndexes);
|
||||
fullIndexMap.put(smFileNamePrefix + TieredAggregationTable.table_x15m.getTableSuffix(), smSingleValueIndexes);
|
||||
fullIndexMap.put(smFileNamePrefix + TieredAggregationTable.table_x30m.getTableSuffix(), smSingleValueIndexes);
|
||||
fullIndexMap.put(smFileNamePrefix + TieredAggregationTable.table_x1h.getTableSuffix(), smSingleValueIndexes);
|
||||
fullIndexMap.put(smFileNamePrefix + TieredAggregationTable.table_x4h.getTableSuffix(), smSingleValueIndexes);
|
||||
fullIndexMap.put(smFileNamePrefix + TieredAggregationTable.table_x24h.getTableSuffix(), smSingleValueIndexes);
|
||||
|
||||
//keep track of all known file name prefixes - used to identify data files
|
||||
fileNamePrefixes.add(reeFileNamePrefix);
|
||||
fileNamePrefixes.add(reeFileNamePrefix);
|
||||
fileNamePrefixes.add(seFileNamePrefix);
|
||||
fileNamePrefixes.add(smFileNamePrefix);
|
||||
fileNamePrefixes.add(smFileNamePrefix + TieredAggregationTable.table_x5m.getTableSuffix());
|
||||
fileNamePrefixes.add(smFileNamePrefix + TieredAggregationTable.table_x15m.getTableSuffix());
|
||||
fileNamePrefixes.add(smFileNamePrefix + TieredAggregationTable.table_x30m.getTableSuffix());
|
||||
fileNamePrefixes.add(smFileNamePrefix + TieredAggregationTable.table_x1h.getTableSuffix());
|
||||
fileNamePrefixes.add(smFileNamePrefix + TieredAggregationTable.table_x4h.getTableSuffix());
|
||||
fileNamePrefixes.add(smFileNamePrefix + TieredAggregationTable.table_x24h.getTableSuffix());
|
||||
*/
|
||||
}
|
||||
|
||||
public Map<String, RecordIndexValueExtractor> getIndexMap(String fileNamePrefix){
|
||||
Map<String, RecordIndexValueExtractor> ret = fullIndexMap.get(fileNamePrefix);
|
||||
if(ret == null){
|
||||
ret = new HashMap<>();
|
||||
fullIndexMap.put(fileNamePrefix, ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
public Set<String> getAllIndexesForFileNamePrefix(String fileNamePrefix){
|
||||
return getIndexMap(fileNamePrefix).keySet();
|
||||
}
|
||||
|
||||
public RecordIndexValueExtractor getIndexValueExtractor(String fileNamePrefix, String indexName){
|
||||
Map<String, RecordIndexValueExtractor> idxMap = getIndexMap(fileNamePrefix);
|
||||
return idxMap!=null?idxMap.get(indexName):null;
|
||||
}
|
||||
|
||||
public Set<String> getAllFileNamePrefixes(){
|
||||
return fileNamePrefixes;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,92 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.writer;
|
||||
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore;
|
||||
import com.telecominfraproject.wlan.server.exceptions.GenericErrorException;
|
||||
|
||||
/**
|
||||
* QueueReader picks up objects from the queue and writes them into zip streams in memory - one stream per N minutes, according to HierarchicalDatastore settings.<br>
|
||||
* Zip streams will be flushed into files after they have been idle for X minutes, according to HierarchicalDatastore settings.<br>
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class QueueReader {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(QueueReader.class);
|
||||
|
||||
@SuppressWarnings("serial")
|
||||
static final BaseJsonModel poisonPill = new BaseJsonModel(){};
|
||||
|
||||
private final int customerId;
|
||||
private final long equipmentId;
|
||||
private final String fileNamePrefix;
|
||||
private final BlockingQueue<BaseJsonModel> queue = new ArrayBlockingQueue<>(5000);
|
||||
private final Thread queueReaderThread;
|
||||
private boolean shutdownRequested;
|
||||
|
||||
public QueueReader(HierarchicalDatastore hierarchicalDatastore, int customerId, long equipmentId, long idleTimeoutBeforeFlushingMs) {
|
||||
this.customerId = customerId;
|
||||
this.equipmentId = equipmentId;
|
||||
this.fileNamePrefix = hierarchicalDatastore.getFileNamePrefix();
|
||||
|
||||
queueReaderThread = new Thread(new QueueReaderRunnable(queue, hierarchicalDatastore, customerId, equipmentId, idleTimeoutBeforeFlushingMs),
|
||||
"queueReader_"+fileNamePrefix+"_"+customerId+"_"+equipmentId+"_"+System.currentTimeMillis());
|
||||
|
||||
//This thread has to be non-daemon because we need it alive when shutdown hook
|
||||
// runs - to process poison pills and perform flush to files
|
||||
queueReaderThread.setDaemon(false);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Start reading messages from the queue
|
||||
*/
|
||||
public void start(){
|
||||
queueReaderThread.start();
|
||||
}
|
||||
|
||||
public int getQueueSize(){
|
||||
return queue.size();
|
||||
}
|
||||
|
||||
public boolean isAlive(){
|
||||
return queueReaderThread.isAlive();
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown and flush to files all existing streams.
|
||||
* Actual shutdown and flush is performed when poison pill is read by the queueReaderThread.
|
||||
*/
|
||||
public void shutdown(){
|
||||
if(!shutdownRequested){
|
||||
shutdownRequested = true;
|
||||
LOG.info("Shutting down queue {}_{}_{}", fileNamePrefix, customerId, equipmentId);
|
||||
addToQueue(poisonPill);
|
||||
}
|
||||
}
|
||||
|
||||
public void addToQueue(BaseJsonModel model){
|
||||
LOG.trace("Adding model to queue({}_{}_{}) {}", fileNamePrefix, customerId, equipmentId, model);
|
||||
try {
|
||||
queue.put(model);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new GenericErrorException("Interrupted while trying to insert model into a queue " + fileNamePrefix + " for customer "+customerId+" equipment "+equipmentId, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if shutdown was requested and queue reader thread have flushed all streams to S3 and exited
|
||||
*/
|
||||
public boolean isShutdownCompleted() {
|
||||
return shutdownRequested && !queueReaderThread.isAlive();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,179 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.writer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Calendar;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.netflix.servo.monitor.Counter;
|
||||
import com.telecominfraproject.wlan.cloudmetrics.CloudMetricsUtils;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.core.model.json.interfaces.HasCustomerId;
|
||||
import com.telecominfraproject.wlan.core.model.json.interfaces.HasEquipmentId;
|
||||
import com.telecominfraproject.wlan.core.model.json.interfaces.HasProducedTimestamp;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
*
|
||||
* This class reads models from a customer_equipment queue, writes them
|
||||
* into appropriate zipStreams according to model timestamps, and
|
||||
* triggers flushes of those streams to files.
|
||||
*
|
||||
*/
|
||||
public class QueueReaderRunnable implements Runnable {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(QueueReader.class);
|
||||
|
||||
private final Map<Long, StreamHolder> streamMap = new HashMap<>();
|
||||
|
||||
private final int customerId;
|
||||
private final long equipmentId;
|
||||
private final BlockingQueue<BaseJsonModel> queue;
|
||||
private final String servoMetricPrefix;
|
||||
private final Counter processedModelCounter;
|
||||
private final HierarchicalDatastore hierarchicalDatastore;
|
||||
private final long idleTimeoutBeforeFlushingMs;
|
||||
|
||||
public QueueReaderRunnable(BlockingQueue<BaseJsonModel> queue, HierarchicalDatastore hierarchicalDatastore, int customerId, long equipmentId, long idleTimeoutBeforeFlushingMs) {
|
||||
this.queue = queue;
|
||||
this.customerId = customerId;
|
||||
this.equipmentId = equipmentId;
|
||||
this.servoMetricPrefix = "hdsQueueReader-"+hierarchicalDatastore.getDsRootDirName()+"-"+hierarchicalDatastore.getDsPrefix()+"-"+hierarchicalDatastore.getFileNamePrefix()+"-";
|
||||
this.processedModelCounter = CloudMetricsUtils.getCounter(servoMetricPrefix+"processedModel-count");
|
||||
this.idleTimeoutBeforeFlushingMs = idleTimeoutBeforeFlushingMs;
|
||||
this.hierarchicalDatastore = hierarchicalDatastore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try{
|
||||
LOG.info("Started QueueReader thread {} ", Thread.currentThread().getName());
|
||||
while(true){
|
||||
BaseJsonModel model = null;
|
||||
try {
|
||||
model = queue.poll(idleTimeoutBeforeFlushingMs, TimeUnit.MILLISECONDS);
|
||||
|
||||
if(model!=null){
|
||||
LOG.trace("Got from queue({}:{}) {}", customerId, equipmentId, model);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
// do nothing
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
|
||||
if(model == QueueReader.poisonPill){
|
||||
//stop work and exit
|
||||
break;
|
||||
}
|
||||
|
||||
try{
|
||||
|
||||
if(model==null){
|
||||
//did not read anything from the queue after idleTimeout of waiting.
|
||||
//Check if existing zipStreams need to be flushed into files.
|
||||
//Although we did not read anything from the queue here, we do not want to wait too long until
|
||||
//the next model appears in the queue before we flush existing streams to files.
|
||||
//That is why we are flushing to files after the idleTimeout after last model was read from queue, no matter what.
|
||||
commitOutputStreamsToFiles(false);
|
||||
|
||||
//wait for the next message in the queue
|
||||
continue;
|
||||
}
|
||||
|
||||
if( !( model instanceof HasCustomerId
|
||||
&& model instanceof HasEquipmentId
|
||||
&& model instanceof HasProducedTimestamp
|
||||
)){
|
||||
LOG.debug("Not enough information to store this model {}, will skip it. Model has to provide customer id, equipment id and timestamp.", model.getClass());
|
||||
//wait for the next message in the queue
|
||||
continue;
|
||||
}
|
||||
|
||||
long modelTs = ((HasProducedTimestamp) model).getProducedTimestampMs();
|
||||
|
||||
//determine the in-memory stream to write the model to
|
||||
//first normalize timestamp to n minutes - per hDatastore configuration
|
||||
long normalizedModelTs = modelTs - modelTs%(1L*hierarchicalDatastore.getNumberOfMinutesPerFile()*60*1000);
|
||||
//then find the stream from the normalized timestamp
|
||||
StreamHolder streamHolder = streamMap.get(normalizedModelTs);
|
||||
|
||||
//create stream if needed - only one thread is doing this
|
||||
if(streamHolder == null){
|
||||
streamHolder = new StreamHolder(modelTs, customerId, equipmentId, hierarchicalDatastore);
|
||||
streamMap.put(streamHolder.getStreamKey(), streamHolder);
|
||||
}
|
||||
|
||||
streamHolder.writeModelToStream(model);
|
||||
|
||||
processedModelCounter.increment();
|
||||
|
||||
//flush only idle streams to files
|
||||
commitOutputStreamsToFiles(false);
|
||||
|
||||
}catch(Exception e){
|
||||
LOG.error("Exception when writing into stream", e);
|
||||
}
|
||||
}
|
||||
|
||||
//unconditionally flush the remainder of streams into files before exiting
|
||||
try {
|
||||
commitOutputStreamsToFiles(true);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Exception when writing into stream", e);
|
||||
}
|
||||
}catch(Exception e){
|
||||
LOG.error("Got exception: ",e);
|
||||
}
|
||||
|
||||
LOG.info("Thread exited {}", Thread.currentThread().getName());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Check all open streams, and upload them to files if they were idle for longer than idleTimeoutBeforeFlushingMs.
|
||||
* @param forceFlush - if true, then unconditionally flush all existing streams to files
|
||||
* @throws IOException
|
||||
*/
|
||||
private void commitOutputStreamsToFiles(boolean forceFlush) throws IOException {
|
||||
StreamHolder streamHolder;
|
||||
Map.Entry<Long, StreamHolder> mapEntry;
|
||||
Iterator<Map.Entry<Long, StreamHolder>> iter = streamMap.entrySet().iterator();
|
||||
Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
|
||||
while(iter.hasNext()){
|
||||
mapEntry = iter.next();
|
||||
streamHolder = mapEntry.getValue();
|
||||
if(forceFlush || (System.currentTimeMillis() - streamHolder.getLastModelWrittenToStreamTimestampMs()) >= idleTimeoutBeforeFlushingMs ){
|
||||
//stream was idle long enough, can flush it to file now
|
||||
streamHolder.commitOutputStreamToFile();
|
||||
|
||||
//stream is uploaded to file, no need to keep it in memory anymore
|
||||
iter.remove();
|
||||
|
||||
//now we can update fileCreatedTimestampsForInterval in hazelcast - append new timestamp for just-uploaded-stream to it
|
||||
calendar.setTime(new Date(streamHolder.getZipStreamStartTimeMs()));
|
||||
int year = calendar.get(Calendar.YEAR);
|
||||
int month = calendar.get(Calendar.MONTH) + 1;
|
||||
int day = calendar.get(Calendar.DAY_OF_MONTH);
|
||||
int hour = calendar.get(Calendar.HOUR_OF_DAY);
|
||||
//int minute = calendar.get(Calendar.MINUTE);
|
||||
|
||||
hierarchicalDatastore.appendFileNameToDirectoryListing(customerId, equipmentId, year, month, day, hour, streamHolder.getFullFileName());
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,157 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.writer;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.netflix.servo.monitor.Counter;
|
||||
import com.telecominfraproject.wlan.cloudmetrics.CloudMetricsUtils;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.core.model.json.interfaces.HasCustomerId;
|
||||
import com.telecominfraproject.wlan.core.model.json.interfaces.HasEquipmentId;
|
||||
import com.telecominfraproject.wlan.core.model.json.interfaces.HasProducedTimestamp;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore;
|
||||
|
||||
/**
|
||||
* This component takes incoming BaseJsonModel, extracts partitioning key from
|
||||
* it (usually customerId_equipmentId), and delivers BaseJsonModel into
|
||||
* appropriate queue.<br>
|
||||
* QueueReaders will pick up objects from the queues and write them into zip streams in memory.<br>
|
||||
* Zip stream will be flushed into file every n-minute - according to hDatastore configuration.<br>
|
||||
*
|
||||
* This object instance is shared between all threads that read from kafka.
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class RecordToQueueMapper {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RecordToQueueMapper.class);
|
||||
|
||||
private Map<String, QueueReader> partitionedQueues = new ConcurrentHashMap<>();
|
||||
|
||||
private final HierarchicalDatastore hierarchicalDatastore;
|
||||
private final String servoMetricPrefix;
|
||||
private final Counter addModelCounter;
|
||||
private boolean shutdownRequested;
|
||||
|
||||
public RecordToQueueMapper(HierarchicalDatastore hierarchicalDatastore) {
|
||||
this.hierarchicalDatastore = hierarchicalDatastore;
|
||||
this.servoMetricPrefix = "hdsQueueReader-"+hierarchicalDatastore.getDsRootDirName()+"-"+hierarchicalDatastore.getDsPrefix()+"-"+hierarchicalDatastore.getFileNamePrefix()+"-";
|
||||
this.addModelCounter = CloudMetricsUtils.getCounter(servoMetricPrefix+"addModel-count");
|
||||
|
||||
CloudMetricsUtils.registerGauge(servoMetricPrefix+"numQueues",
|
||||
new Callable<Long>(){
|
||||
@Override
|
||||
public Long call() throws Exception {
|
||||
return (long) partitionedQueues.size();
|
||||
}
|
||||
});
|
||||
|
||||
CloudMetricsUtils.registerGauge(servoMetricPrefix+"totalQueueSize",
|
||||
new Callable<Long>(){
|
||||
@Override
|
||||
public Long call() throws Exception {
|
||||
long s = 0;
|
||||
try{
|
||||
for(QueueReader qr: partitionedQueues.values()){
|
||||
s+=qr.getQueueSize();
|
||||
}
|
||||
}catch(Exception e){
|
||||
//ignore it, will repeat at next metrics poll cycle
|
||||
}
|
||||
return s;
|
||||
}
|
||||
});
|
||||
|
||||
CloudMetricsUtils.registerGauge(servoMetricPrefix+"numDeadThreads",
|
||||
new Callable<Long>(){
|
||||
@Override
|
||||
public Long call() throws Exception {
|
||||
long s = 0;
|
||||
try{
|
||||
for(QueueReader qr: partitionedQueues.values()){
|
||||
if(!qr.isAlive()){
|
||||
s++;
|
||||
}
|
||||
}
|
||||
}catch(Exception e){
|
||||
//ignore it, will repeat at next metrics poll cycle
|
||||
}
|
||||
return s;
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
public void addModel(BaseJsonModel model){
|
||||
|
||||
if( !( model instanceof HasCustomerId
|
||||
&& model instanceof HasEquipmentId
|
||||
&& model instanceof HasProducedTimestamp
|
||||
)){
|
||||
LOG.debug("Not enough information to store this model {}, will skip it. Model has to provide customer id, equipment id and timestamp.", model.getClass());
|
||||
//wait for the next message in the queue
|
||||
return;
|
||||
}
|
||||
|
||||
int customerId = ((HasCustomerId)model).getCustomerId();
|
||||
long equipmentId = ((HasEquipmentId)model).getEquipmentId();
|
||||
|
||||
String partitionKey = Integer.toString(customerId) + "_" + Long.toString(equipmentId);
|
||||
|
||||
QueueReader queueReader = partitionedQueues.get(partitionKey);
|
||||
if(queueReader == null){
|
||||
synchronized(partitionedQueues){
|
||||
queueReader = partitionedQueues.get(partitionKey);
|
||||
if(queueReader==null){
|
||||
queueReader = new QueueReader(hierarchicalDatastore, customerId, equipmentId, hierarchicalDatastore.getIdleTimeoutBeforeFlushingMs());
|
||||
partitionedQueues.put(partitionKey, queueReader);
|
||||
queueReader.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addModelCounter.increment();
|
||||
queueReader.addToQueue(model);
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown and flush to files all existing queue readers
|
||||
*/
|
||||
public void shutdown(){
|
||||
if(!shutdownRequested){
|
||||
shutdownRequested = true;
|
||||
for(QueueReader qReader: partitionedQueues.values()){
|
||||
qReader.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isShutdownRequested(){
|
||||
return shutdownRequested;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if shutdown was requested and all queue readers have flushed to files
|
||||
*/
|
||||
public boolean isShutdownCompleted() {
|
||||
if(!shutdownRequested){
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean shutdownDone = true;
|
||||
for(Map.Entry<String,QueueReader> entry: partitionedQueues.entrySet()){
|
||||
shutdownDone = shutdownDone && entry.getValue().isShutdownCompleted();
|
||||
LOG.info("Shutdown status for QueueReader {} : {}", entry.getKey(), entry.getValue().isShutdownCompleted());
|
||||
if(!shutdownDone){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return shutdownDone;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,198 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore.writer;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexCounts;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexPositions;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexValueExtractor;
|
||||
import com.telecominfraproject.wlan.server.exceptions.GenericErrorException;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
* Class that holds a single zipStream and related properties.
|
||||
* Responsible for creation of the stream, writing models into it, and uploading stream to s3.
|
||||
*/
|
||||
public class StreamHolder {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(StreamHolder.class);
|
||||
|
||||
private ByteArrayOutputStream outputStream;
|
||||
private ZipOutputStream zipOutputStream;
|
||||
private long zipStreamStartTimeMs;
|
||||
|
||||
private final int customerId;
|
||||
private final long equipmentId;
|
||||
private final HierarchicalDatastore hierarchicalDatastore;
|
||||
|
||||
private int bytesWrittenToPart;
|
||||
private int lineNumber;
|
||||
private String fullFileName;
|
||||
private String partFileName;
|
||||
private long timestampFromFileName;
|
||||
private long lastModelWrittenToStreamTimestampMs = System.currentTimeMillis();
|
||||
|
||||
private Map<String, RecordIndexCounts> indexCountsMap = new HashMap<>();
|
||||
private Map<String, RecordIndexPositions> indexPositionsMap = new HashMap<>();
|
||||
|
||||
public StreamHolder(long streamFirstModelStartTimeMs, int customerId, long equipmentId, HierarchicalDatastore hierarchicalDatastore){
|
||||
this.customerId = customerId;
|
||||
this.equipmentId = equipmentId;
|
||||
this.hierarchicalDatastore = hierarchicalDatastore;
|
||||
|
||||
this.zipStreamStartTimeMs = streamFirstModelStartTimeMs;
|
||||
//normalize timestamp to n minutes - per HDatastore configuration
|
||||
this.zipStreamStartTimeMs = zipStreamStartTimeMs - zipStreamStartTimeMs%(1L*hierarchicalDatastore.getNumberOfMinutesPerFile()*60*1000);
|
||||
|
||||
this.fullFileName = hierarchicalDatastore.getFileNameForNewFile(customerId, equipmentId, zipStreamStartTimeMs);
|
||||
//extract file name - from the last '/' until '.zip'
|
||||
this.partFileName = fullFileName.substring(fullFileName.lastIndexOf('/')+1, fullFileName.length()-4);
|
||||
//extract timestamp from the file name - number after the last '_'
|
||||
this.timestampFromFileName = Long.parseLong(partFileName.substring(partFileName.lastIndexOf('_')+1));
|
||||
|
||||
this.outputStream = new ByteArrayOutputStream(5*1024);
|
||||
this.zipOutputStream = new ZipOutputStream(outputStream);
|
||||
ZipEntry entry = new ZipEntry(partFileName);
|
||||
|
||||
try {
|
||||
this.zipOutputStream.putNextEntry(entry);
|
||||
} catch (IOException e) {
|
||||
throw new GenericErrorException("Cannot write first zip entry into "+partFileName, e);
|
||||
}
|
||||
|
||||
LOG.info("Created new file {}", partFileName);
|
||||
|
||||
}
|
||||
|
||||
public long getStreamKey(){
|
||||
return zipStreamStartTimeMs;
|
||||
}
|
||||
|
||||
public long getTimestampFromFileName(){
|
||||
return timestampFromFileName;
|
||||
}
|
||||
|
||||
public void commitOutputStreamToFile() throws IOException {
|
||||
|
||||
if(outputStream == null){
|
||||
//nothing to do here
|
||||
return;
|
||||
}
|
||||
|
||||
LOG.info("Closing existing stream from queue({}_{}_{})", hierarchicalDatastore.getFileNamePrefix(), customerId, equipmentId);
|
||||
|
||||
zipOutputStream.closeEntry();
|
||||
zipOutputStream.flush();
|
||||
zipOutputStream.close();
|
||||
|
||||
outputStream.flush();
|
||||
|
||||
//write into file only if at least one record was put into output stream
|
||||
//otherwise - just close existing stream
|
||||
if(bytesWrittenToPart > 0){
|
||||
//write collected bytes into file
|
||||
byte[] collectedBytes = outputStream.toByteArray();
|
||||
ByteArrayInputStream bais = new ByteArrayInputStream(collectedBytes);
|
||||
hierarchicalDatastore.uploadStreamToFileOverwriteOld(bais, collectedBytes.length, fullFileName);
|
||||
LOG.trace("Uploaded to s3 {}", fullFileName);
|
||||
|
||||
}
|
||||
|
||||
outputStream.close();
|
||||
outputStream = null;
|
||||
zipOutputStream = null;
|
||||
|
||||
|
||||
//now write accumulated values for all registered indexes - one file per index, not compressed
|
||||
Map<String, RecordIndexValueExtractor> allIndexes = hierarchicalDatastore.getRecordIndexes();
|
||||
for(Map.Entry<String, RecordIndexValueExtractor> idxEntry: allIndexes.entrySet()){
|
||||
String idxName = idxEntry.getKey();
|
||||
|
||||
//for every index we'll store record counts and record line numbers in the data file
|
||||
RecordIndexCounts idxCounts = indexCountsMap.get(idxName);
|
||||
if(idxCounts==null){
|
||||
idxCounts = new RecordIndexCounts();
|
||||
idxCounts.setName(idxName);
|
||||
}
|
||||
|
||||
RecordIndexPositions idxPositions = indexPositionsMap.get(idxName);
|
||||
if(idxPositions==null){
|
||||
idxPositions = new RecordIndexPositions();
|
||||
idxPositions.setName(idxName);
|
||||
}
|
||||
|
||||
hierarchicalDatastore.storeRecordIndex(idxName, idxCounts, idxPositions, fullFileName);
|
||||
|
||||
LOG.trace("Uploaded index {} for {}", idxName, fullFileName);
|
||||
}
|
||||
|
||||
indexCountsMap.clear();
|
||||
indexPositionsMap.clear();
|
||||
|
||||
}
|
||||
|
||||
public void writeModelToStream(BaseJsonModel model) throws IOException {
|
||||
|
||||
byte[] modelBytes = model.toString().getBytes(StandardCharsets.UTF_8);
|
||||
zipOutputStream.write(modelBytes);
|
||||
zipOutputStream.write(13);
|
||||
zipOutputStream.write(10);
|
||||
|
||||
bytesWrittenToPart += modelBytes.length + 2;
|
||||
|
||||
lastModelWrittenToStreamTimestampMs = System.currentTimeMillis();
|
||||
|
||||
//process all record indexes registered in appropriate HDS
|
||||
Map<String, RecordIndexValueExtractor> allIndexes = hierarchicalDatastore.getRecordIndexes();
|
||||
for(Map.Entry<String, RecordIndexValueExtractor> idxEntry: allIndexes.entrySet()){
|
||||
String idxName = idxEntry.getKey();
|
||||
RecordIndexValueExtractor valueExtractor = idxEntry.getValue();
|
||||
|
||||
//for every index we'll update record counts and record line numbers in the data file
|
||||
RecordIndexCounts idxCounts = indexCountsMap.get(idxName);
|
||||
if(idxCounts==null){
|
||||
idxCounts = new RecordIndexCounts();
|
||||
idxCounts.setName(idxName);
|
||||
indexCountsMap.put(idxName, idxCounts);
|
||||
}
|
||||
|
||||
RecordIndexPositions idxPositions = indexPositionsMap.get(idxName);
|
||||
if(idxPositions==null){
|
||||
idxPositions = new RecordIndexPositions();
|
||||
idxPositions.setName(idxName);
|
||||
indexPositionsMap.put(idxName, idxPositions);
|
||||
}
|
||||
|
||||
for(String idxValue : valueExtractor.extractValues(model)) {
|
||||
idxCounts.incrementCountForValue(idxValue);
|
||||
idxPositions.addPositionForValue(idxValue, lineNumber);
|
||||
}
|
||||
}
|
||||
|
||||
//move to the next line in the data file
|
||||
lineNumber++;
|
||||
}
|
||||
|
||||
public long getLastModelWrittenToStreamTimestampMs() {
|
||||
return lastModelWrittenToStreamTimestampMs;
|
||||
}
|
||||
|
||||
public long getZipStreamStartTimeMs() {
|
||||
return zipStreamStartTimeMs;
|
||||
}
|
||||
|
||||
public String getFullFileName() {
|
||||
return fullFileName;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,631 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.context.support.PropertySourcesPlaceholderConfigurer;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import com.google.common.io.Files;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.IMap;
|
||||
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest;
|
||||
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest.HazelcastUnitTestManager;
|
||||
import com.telecominfraproject.wlan.hazelcast.common.HazelcastObjectsConfiguration;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HourlyIndexFileNames;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.DirectoryIndex;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexValueExtractor;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.registry.RecordIndexRegistry;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.writer.StreamHolder;
|
||||
import com.telecominfraproject.wlan.core.model.filter.EntryFilter;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* @author dtoptygin
|
||||
*
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@SpringBootTest(webEnvironment = WebEnvironment.NONE, classes = HierarchicalDatastoreHourlyIndexTests.class)
|
||||
@Import(value = {
|
||||
HazelcastForUnitTest.class,
|
||||
RecordIndexRegistry.class,
|
||||
HazelcastObjectsConfiguration.class,
|
||||
PropertySourcesPlaceholderConfigurer.class, //must have this to resolve non-string @Value annotations, i.e. int properties, etc.
|
||||
})
|
||||
@ActiveProfiles({"HazelcastForUnitTest"})
|
||||
public class HierarchicalDatastoreHourlyIndexTests {
|
||||
|
||||
static{
|
||||
System.setProperty("tip.wlan.hdsExecutorQueueSize", "5000");
|
||||
System.setProperty("tip.wlan.hdsExecutorThreads", "10");
|
||||
System.setProperty("tip.wlan.hdsExecutorCoreThreadsFactor", "1");
|
||||
HazelcastUnitTestManager.initializeSystemProperty(HierarchicalDatastoreHourlyIndexTests.class);
|
||||
}
|
||||
|
||||
static final HazelcastUnitTestManager testManager = new HazelcastUnitTestManager();
|
||||
|
||||
@Autowired
|
||||
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
|
||||
this.hazelcastInstance = hazelcastInstance;
|
||||
testManager.registerInstance(hazelcastInstance);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void shutdown() {
|
||||
testManager.shutdownAllInstances();
|
||||
}
|
||||
|
||||
private static final String dsRootDirName = "hds-test";
|
||||
private static final String dsPrefix = "testDsHIT";
|
||||
|
||||
String fileNamePrefix = "testF";
|
||||
String hazelcastMapPrefix = fileNamePrefix+"-";
|
||||
int numberOfMinutesPerFile = 1;
|
||||
|
||||
private HazelcastInstance hazelcastInstance;
|
||||
@Autowired HazelcastObjectsConfiguration hazelcastObjectsConfiguration;
|
||||
|
||||
@Autowired RecordIndexRegistry recordIndexRegistry;
|
||||
|
||||
HierarchicalDatastore hDs;
|
||||
|
||||
ExecutorService executor = Executors.newFixedThreadPool(8, new ThreadFactory(){
|
||||
int cnt;
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
Thread thr = new Thread(r, "UnitTest-HierarchicalDatastoreTests-"+(cnt++));
|
||||
thr.setDaemon(true);
|
||||
return thr;
|
||||
}
|
||||
});
|
||||
|
||||
@AfterClass
|
||||
public static void removeAllHdsFiles(){
|
||||
File rootDir = new File(dsRootDirName + File.separator + dsPrefix);
|
||||
if(rootDir.getAbsolutePath().equals("/")) {
|
||||
throw new IllegalArgumentException("attempting to delete / - please make sure your dsRootDirName and ds Prefix are not empty strings!");
|
||||
}
|
||||
|
||||
for(File f : Files.fileTreeTraverser().postOrderTraversal(rootDir)) {
|
||||
f.delete();
|
||||
}
|
||||
|
||||
rootDir.delete();
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
void initHds(){
|
||||
//remove previous datastore content, if any
|
||||
removeAllHdsFiles();
|
||||
|
||||
hDs = new HierarchicalDatastore(dsRootDirName, dsPrefix, fileNamePrefix, numberOfMinutesPerFile, 20L,
|
||||
hazelcastInstance, hazelcastMapPrefix, hazelcastObjectsConfiguration, recordIndexRegistry);
|
||||
}
|
||||
|
||||
|
||||
private final static String recordTypeIdx = "recordType";
|
||||
private final static String clientIdx = "client";
|
||||
private final static String manyClientIdx = "manyClient";
|
||||
|
||||
@Test
|
||||
public void testNormalOneIndex() throws IOException{
|
||||
|
||||
//create 2 data files in s3ds, 2 records each - one with record index, one without any indexes
|
||||
|
||||
String type1 = "t1";
|
||||
String type2 = "t2";
|
||||
String type3 = "t3";
|
||||
String type4 = "t4";
|
||||
String type5 = "t5";
|
||||
String client1 = "c1";
|
||||
String client2 = "c2";
|
||||
String value1 = "v1";
|
||||
String value2 = "v2";
|
||||
String value3 = "v3";
|
||||
String value4 = "v4";
|
||||
|
||||
TestModelForHds mdl1 = new TestModelForHds(type1, client1, value1);
|
||||
TestModelForHds mdl2 = new TestModelForHds(type2, client2, value2);
|
||||
TestModelForHds mdl3 = new TestModelForHds(type1, client1, value3);
|
||||
TestModelForHds mdl4 = new TestModelForHds(type2, client2, value4);
|
||||
|
||||
int customerId = (int)System.currentTimeMillis();
|
||||
long equipmentId = System.currentTimeMillis();
|
||||
//if time is less than 70 minutes from now, hourly indexes will not be used, so we'll adjust time to be in the past
|
||||
long streamFirstModelStartTimeMs = System.currentTimeMillis() - TimeUnit.HOURS.toMillis(2);
|
||||
|
||||
//align our data files at the beginning of the hour, so that if test runs at the end of the hour we do not end up with 2 hourly directories
|
||||
streamFirstModelStartTimeMs = streamFirstModelStartTimeMs
|
||||
- streamFirstModelStartTimeMs%TimeUnit.HOURS.toMillis(1)
|
||||
+ ((long)(100*Math.random()));
|
||||
|
||||
//first write a datafile when no indexes are present
|
||||
StreamHolder streamHolder1 = new StreamHolder(streamFirstModelStartTimeMs , customerId, equipmentId, hDs);
|
||||
streamHolder1.writeModelToStream(mdl1);
|
||||
streamHolder1.writeModelToStream(mdl2);
|
||||
streamHolder1.commitOutputStreamToFile();
|
||||
|
||||
String dataFileName = streamHolder1.getFullFileName();
|
||||
String hourlyDirectoryName = dataFileName.substring(0, dataFileName.lastIndexOf('/')+1);
|
||||
HourlyIndexFileNames recordTypeIdxHourlyIndexFileNames = new HourlyIndexFileNames(
|
||||
customerId, equipmentId, streamFirstModelStartTimeMs, streamFirstModelStartTimeMs+ 1000,
|
||||
recordTypeIdx, dsPrefix, fileNamePrefix, numberOfMinutesPerFile);
|
||||
String recordTypeIdxHourlyIdxFileName = recordTypeIdxHourlyIndexFileNames.iterator().next();
|
||||
|
||||
HourlyIndexFileNames clientIdxHourlyIndexFileNames = new HourlyIndexFileNames(
|
||||
customerId, equipmentId, streamFirstModelStartTimeMs, streamFirstModelStartTimeMs+ 1000,
|
||||
clientIdx, dsPrefix, fileNamePrefix, numberOfMinutesPerFile);
|
||||
String clientIdxHourlyIdxFileName = clientIdxHourlyIndexFileNames.iterator().next();
|
||||
|
||||
|
||||
//verify that hourly index is not present
|
||||
Map<String,Long> fileNameToLastModMap = HierarchicalDatastore.getFileNamesAndLastMods(dsRootDirName, hourlyDirectoryName);
|
||||
assertTrue(checkIfS3ObjectExists(streamHolder1.getFullFileName()));
|
||||
assertEquals(1, fileNameToLastModMap.size());
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(dataFileName));
|
||||
|
||||
//Build hourly index - should build none, since no indexes were registered with this datastore.
|
||||
HierarchicalDatastore.rebuildHourlyIndex(hourlyDirectoryName, dsRootDirName, dsPrefix, recordIndexRegistry, hazelcastInstance, hazelcastObjectsConfiguration);
|
||||
|
||||
//verify that hourly index is not present
|
||||
fileNameToLastModMap = HierarchicalDatastore.getFileNamesAndLastMods(dsRootDirName, hourlyDirectoryName);
|
||||
assertEquals(1, fileNameToLastModMap.size());
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(dataFileName));
|
||||
|
||||
//now register an index and write a second datafile, with an index
|
||||
RecordIndexValueExtractor recordTypeIdxValueExtractor = new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
return Collections.singleton(((TestModelForHds)model).getRecordType());
|
||||
}
|
||||
};
|
||||
|
||||
hDs.registerRecordIndex(recordTypeIdx, recordTypeIdxValueExtractor );
|
||||
recordIndexRegistry.getIndexMap(fileNamePrefix).put(recordTypeIdx, recordTypeIdxValueExtractor);
|
||||
recordIndexRegistry.getAllFileNamePrefixes().add(fileNamePrefix);
|
||||
|
||||
StreamHolder streamHolder2 = new StreamHolder(streamFirstModelStartTimeMs + 100 , customerId, equipmentId, hDs);
|
||||
streamHolder2.writeModelToStream(mdl3);
|
||||
streamHolder2.writeModelToStream(mdl4);
|
||||
streamHolder2.commitOutputStreamToFile();
|
||||
|
||||
long streamLastModelTimeMs = streamFirstModelStartTimeMs + 200;
|
||||
|
||||
//verify that 2 files were written to S3 by this time:
|
||||
// one data file before index was registered,
|
||||
// and one data file after index was registered,
|
||||
//index itself is stored in hazelcast
|
||||
List<String> dataFileNames = hDs.getFileNames(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs);
|
||||
assertEquals(2, dataFileNames.size());
|
||||
|
||||
assertTrue(dataFileNames.contains(streamHolder1.getFullFileName()));
|
||||
assertTrue(dataFileNames.contains(streamHolder2.getFullFileName()));
|
||||
|
||||
assertFalse(checkIfHazelcastObjectExists("recIdx-" + hazelcastMapPrefix + recordTypeIdx,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder1.getFullFileName(), recordTypeIdx)));
|
||||
|
||||
//second index should exist because the index was registered with s3ds at the time of writing the data file
|
||||
assertTrue(checkIfHazelcastObjectExists("recIdx-" + hazelcastMapPrefix + recordTypeIdx,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder2.getFullFileName(), recordTypeIdx)));
|
||||
|
||||
EntryFilter<TestModelForHds> type1EntryFilter = new EntryFilter<TestModelForHds>() {
|
||||
@Override
|
||||
public TestModelForHds getFilteredEntry(TestModelForHds entry) {
|
||||
if(entry.getClient().equals(client1)){
|
||||
return entry;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
//Build hourly index - should build one hourly index file, since now one index is registered with this datastore.
|
||||
HierarchicalDatastore.rebuildHourlyIndex(hourlyDirectoryName, dsRootDirName, dsPrefix, recordIndexRegistry, hazelcastInstance, hazelcastObjectsConfiguration);
|
||||
|
||||
//verify that hourly index is present
|
||||
fileNameToLastModMap = HierarchicalDatastore.getFileNamesAndLastMods(dsRootDirName, hourlyDirectoryName);
|
||||
assertEquals(3, fileNameToLastModMap.size());
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(streamHolder1.getFullFileName()));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(streamHolder2.getFullFileName()));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(recordTypeIdxHourlyIdxFileName));
|
||||
|
||||
//check content of the hourly index file
|
||||
DirectoryIndex recordTypeIdxHourlyIdx = HierarchicalDatastore.getZippedModelFromFile(dsRootDirName,
|
||||
recordTypeIdxHourlyIdxFileName, DirectoryIndex.class);
|
||||
assertNotNull(recordTypeIdxHourlyIdx);
|
||||
assertEquals(recordTypeIdx, recordTypeIdxHourlyIdx.getName());
|
||||
//there should be indexes created for 2 files
|
||||
assertEquals(2, recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().size());
|
||||
//first data file has 2 models, first one with type1, second one with type2
|
||||
assertEquals(2, recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue(type1).get(0));
|
||||
assertEquals(1, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue(type2).get(0));
|
||||
//second data file has 2 models, first one with type1, second one with type2
|
||||
assertEquals(2, recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue(type1).get(0));
|
||||
assertEquals(1, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue(type2).get(0));
|
||||
|
||||
verifyAllOperations(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, mdl1, mdl2, mdl3, mdl4, recordTypeIdx, type1, type2, type1EntryFilter);
|
||||
|
||||
|
||||
//Test normal index operations when more than one index is present
|
||||
//register a second index and re-create hourly index file
|
||||
RecordIndexValueExtractor clientIdxValueExtractor = new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
return Collections.singleton(((TestModelForHds)model).getClient());
|
||||
}
|
||||
};
|
||||
|
||||
hDs.registerRecordIndex(clientIdx, clientIdxValueExtractor);
|
||||
recordIndexRegistry.getIndexMap(fileNamePrefix).put(clientIdx, clientIdxValueExtractor);
|
||||
|
||||
|
||||
//Build hourly index - should build two hourly index files, since now two indexes are registered with this datastore.
|
||||
HierarchicalDatastore.rebuildHourlyIndex(hourlyDirectoryName, dsRootDirName, dsPrefix, recordIndexRegistry, hazelcastInstance, hazelcastObjectsConfiguration);
|
||||
|
||||
//verify first index file
|
||||
//verify that two hourly indexes are present
|
||||
fileNameToLastModMap = HierarchicalDatastore.getFileNamesAndLastMods(dsRootDirName, hourlyDirectoryName);
|
||||
assertEquals(4, fileNameToLastModMap.size());
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(streamHolder1.getFullFileName()));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(streamHolder2.getFullFileName()));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(recordTypeIdxHourlyIdxFileName));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(clientIdxHourlyIdxFileName));
|
||||
|
||||
//check content of the hourly index file for recordTypeIdx
|
||||
recordTypeIdxHourlyIdx = HierarchicalDatastore.getZippedModelFromFile(dsRootDirName,
|
||||
recordTypeIdxHourlyIdxFileName, DirectoryIndex.class);
|
||||
assertNotNull(recordTypeIdxHourlyIdx);
|
||||
assertEquals(recordTypeIdx, recordTypeIdxHourlyIdx.getName());
|
||||
//there should be indexes created for 2 files
|
||||
assertEquals(2, recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().size());
|
||||
//first data file has 2 models, first one with type1, second one with type2
|
||||
assertEquals(2, recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue(type1).get(0));
|
||||
assertEquals(1, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue(type2).get(0));
|
||||
//second data file has 2 models, first one with type1, second one with type2
|
||||
assertEquals(2, recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue(type1).get(0));
|
||||
assertEquals(1, (int) recordTypeIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue(type2).get(0));
|
||||
|
||||
//check content of the hourly index file for clientIdx
|
||||
DirectoryIndex clientIdxHourlyIdx = HierarchicalDatastore.getZippedModelFromFile(dsRootDirName,
|
||||
clientIdxHourlyIdxFileName, DirectoryIndex.class);
|
||||
assertNotNull(clientIdxHourlyIdx);
|
||||
assertEquals(clientIdx, clientIdxHourlyIdx.getName());
|
||||
//there should be indexes created for 2 files
|
||||
assertEquals(2, clientIdxHourlyIdx.getDataFileNameToRecordIndexMap().size());
|
||||
//first data file has 2 models, first one with client1, second one with client2
|
||||
assertEquals(2, clientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) clientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue(client1).get(0));
|
||||
assertEquals(1, (int) clientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue(client2).get(0));
|
||||
//second data file has 2 models, first one with client1, second one with client2
|
||||
assertEquals(2, clientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) clientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue(client1).get(0));
|
||||
assertEquals(1, (int) clientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue(client2).get(0));
|
||||
|
||||
|
||||
//verify that two hourly indexes are present
|
||||
fileNameToLastModMap = HierarchicalDatastore.getFileNamesAndLastMods(dsRootDirName, hourlyDirectoryName);
|
||||
assertEquals(4, fileNameToLastModMap.size());
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(streamHolder1.getFullFileName()));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(streamHolder2.getFullFileName()));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(recordTypeIdxHourlyIdxFileName));
|
||||
assertTrue(fileNameToLastModMap.keySet().contains(clientIdxHourlyIdxFileName));
|
||||
|
||||
EntryFilter<TestModelForHds> client1EntryFilter = new EntryFilter<TestModelForHds>() {
|
||||
@Override
|
||||
public TestModelForHds getFilteredEntry(TestModelForHds entry) {
|
||||
if(entry.getClient().equals(client1)){
|
||||
return entry;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
//verify operations with first index
|
||||
verifyAllOperations(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, mdl1, mdl2, mdl3, mdl4, recordTypeIdx, type1, type2, type1EntryFilter);
|
||||
|
||||
//verify operations with second index
|
||||
//we can re-use verification logic because models with type1 values have client1 and models with type2 values have client2
|
||||
verifyAllOperations(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, mdl1, mdl2, mdl3, mdl4, clientIdx, client1, client2, client1EntryFilter);
|
||||
|
||||
//////
|
||||
// now check multi-valued indexes
|
||||
//////
|
||||
|
||||
//register multi-valued index and write a datafile, with an index
|
||||
RecordIndexValueExtractor manyClientIdxValueExtractor = new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
if(((TestModelForHds)model).getManyClients()==null || ((TestModelForHds)model).getManyClients().isEmpty()){
|
||||
return Collections.singleton("");
|
||||
}
|
||||
|
||||
return new HashSet<>(((TestModelForHds)model).getManyClients());
|
||||
}
|
||||
};
|
||||
|
||||
hDs.registerRecordIndex(manyClientIdx, manyClientIdxValueExtractor );
|
||||
recordIndexRegistry.getIndexMap(fileNamePrefix).put(manyClientIdx, manyClientIdxValueExtractor);
|
||||
recordIndexRegistry.getAllFileNamePrefixes().add(fileNamePrefix);
|
||||
|
||||
TestModelForHds mdl5 = new TestModelForHds(type3, client1, value3);
|
||||
mdl5.setManyClients(new HashSet<>( Arrays.asList(new String[]{"mc1","mc2","mc3"})));
|
||||
TestModelForHds mdl6 = new TestModelForHds(type4, client1, value3);
|
||||
mdl6.setManyClients(new HashSet<>( Arrays.asList(new String[]{"mc1","mc4","mc5"})));
|
||||
TestModelForHds mdl7 = new TestModelForHds(type5, client1, value3);
|
||||
mdl7.setManyClients(new HashSet<>( Arrays.asList(new String[]{})));
|
||||
|
||||
|
||||
streamFirstModelStartTimeMs += 120000 + 3000;
|
||||
StreamHolder streamHolder3 = new StreamHolder(streamFirstModelStartTimeMs, customerId, equipmentId, hDs);
|
||||
streamHolder3.writeModelToStream(mdl5);
|
||||
streamHolder3.writeModelToStream(mdl6);
|
||||
streamHolder3.writeModelToStream(mdl7);
|
||||
streamHolder3.commitOutputStreamToFile();
|
||||
|
||||
//update fileCreatedTimestampsForInterval in hazelcast - append new timestamp for just-uploaded-stream to it
|
||||
Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
calendar.setTime(new Date(streamHolder3.getZipStreamStartTimeMs()));
|
||||
int year = calendar.get(Calendar.YEAR);
|
||||
int month = calendar.get(Calendar.MONTH) + 1;
|
||||
int day = calendar.get(Calendar.DAY_OF_MONTH);
|
||||
int hour = calendar.get(Calendar.HOUR_OF_DAY);
|
||||
|
||||
hDs.appendFileNameToDirectoryListing(customerId, equipmentId, year, month, day, hour, streamHolder3.getFullFileName());
|
||||
|
||||
streamLastModelTimeMs = streamFirstModelStartTimeMs + 100;
|
||||
|
||||
HierarchicalDatastore.rebuildHourlyIndex(hourlyDirectoryName, dsRootDirName, dsPrefix, recordIndexRegistry, hazelcastInstance, hazelcastObjectsConfiguration);
|
||||
|
||||
//verify that no-index search returns 3 models
|
||||
List<TestModelForHds> entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter , null, null /*no index*/, TestModelForHds.class);
|
||||
assertEquals(3 /*new models*/ , entryList.size());
|
||||
assertTrue(entryList.contains(mdl5));
|
||||
assertTrue(entryList.contains(mdl6));
|
||||
assertTrue(entryList.contains(mdl7));
|
||||
|
||||
//verify that manyClientIdx search for "mc2" and "mc4" returns one model
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
manyClientIdx, new HashSet<>(Arrays.asList("mc2")), TestModelForHds.class);
|
||||
assertEquals(1, entryList.size());
|
||||
assertTrue(entryList.contains(mdl5));
|
||||
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
manyClientIdx, new HashSet<>(Arrays.asList("mc4")), TestModelForHds.class);
|
||||
assertEquals(1, entryList.size());
|
||||
assertTrue(entryList.contains(mdl6));
|
||||
|
||||
//verify that manyClientIdx search for "mc1" returns two models
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
manyClientIdx, new HashSet<>(Arrays.asList("mc1")), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl5));
|
||||
assertTrue(entryList.contains(mdl6));
|
||||
|
||||
//check content of the hourly index file for manyClientIdx
|
||||
HourlyIndexFileNames manyClientIdxHourlyIndexFileNames = new HourlyIndexFileNames(
|
||||
customerId, equipmentId, streamFirstModelStartTimeMs, streamFirstModelStartTimeMs+ 1000,
|
||||
manyClientIdx, dsPrefix, fileNamePrefix, numberOfMinutesPerFile);
|
||||
String manyClientIdxHourlyIdxFileName = manyClientIdxHourlyIndexFileNames.iterator().next();
|
||||
|
||||
DirectoryIndex manyClientIdxHourlyIdx = HierarchicalDatastore.getZippedModelFromFile(dsRootDirName,
|
||||
manyClientIdxHourlyIdxFileName, DirectoryIndex.class);
|
||||
assertNotNull(manyClientIdxHourlyIdx);
|
||||
assertEquals(manyClientIdx, manyClientIdxHourlyIdx.getName());
|
||||
|
||||
// {"_type":"DirectoryIndex",
|
||||
// "name":"manyClient",
|
||||
// "dataFileNameToRecordIndexMap":{
|
||||
// "testDs/-1926954131/1492721664877/2017/04/20/18/testF_-1926954131_1492721664877_2017_04_20_18_00_1492721664880.zip":
|
||||
// {"_type":"RecordIndex",
|
||||
// "counts":{"_type":"RecordIndexCounts","name":"manyClient","totalCount":2,"perValueCounts":{"":2}},
|
||||
// "positions":{"_type":"RecordIndexPositions","name":"manyClient",
|
||||
// "perValuePositions":{"":[0,1]}}},
|
||||
// "testDs/-1926954131/1492721664877/2017/04/20/18/testF_-1926954131_1492721664877_2017_04_20_18_02_1492721665391.zip":
|
||||
// {"_type":"RecordIndex",
|
||||
// "counts":{"_type":"RecordIndexCounts","name":"manyClient","totalCount":7,"perValueCounts":{"":1,"mc1":2,"mc3":1,"mc2":1,"mc5":1,"mc4":1}},
|
||||
// "positions":{"_type":"RecordIndexPositions","name":"manyClient","perValuePositions":{"":[2],"mc1":[0,1],"mc3":[0],"mc2":[0],"mc5":[1],"mc4":[1]}}},
|
||||
// "testDs/-1926954131/1492721664877/2017/04/20/18/testF_-1926954131_1492721664877_2017_04_20_18_00_1492721665101.zip":
|
||||
// {"_type":"RecordIndex",
|
||||
// "counts":{"_type":"RecordIndexCounts","name":"manyClient","totalCount":2,"perValueCounts":{"":2}},
|
||||
// "positions":{"_type":"RecordIndexPositions","name":"manyClient","perValuePositions":{"":[0,1]}}}}}
|
||||
|
||||
|
||||
//there should be indexes created for 3 files (streamHolder1, 2 and 3)
|
||||
assertEquals(3, manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().size());
|
||||
//data file for streamholder1 has 2 models
|
||||
assertEquals(2, manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue("").get(0));
|
||||
assertEquals(1, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder1.getFullFileName()).getPositions().getPositionsForValue("").get(1));
|
||||
|
||||
//data file for streamholder2 has 2 models
|
||||
assertEquals(2, manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue("").get(0));
|
||||
assertEquals(1, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder2.getFullFileName()).getPositions().getPositionsForValue("").get(1));
|
||||
|
||||
//data file for streamholder3 has 7 positions (mc1 is pointing to two positions)
|
||||
assertEquals(7, manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getCounts().getTotalCount());
|
||||
assertEquals(0, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getPositions().getPositionsForValue("mc1").get(0));
|
||||
assertEquals(1, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getPositions().getPositionsForValue("mc1").get(1));
|
||||
assertEquals(2, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getPositions().getPositionsForValue("").get(0));
|
||||
assertEquals(0, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getPositions().getPositionsForValue("mc2").get(0));
|
||||
assertEquals(0, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getPositions().getPositionsForValue("mc3").get(0));
|
||||
assertEquals(1, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getPositions().getPositionsForValue("mc4").get(0));
|
||||
assertEquals(1, (int) manyClientIdxHourlyIdx.getDataFileNameToRecordIndexMap().get(streamHolder3.getFullFileName()).getPositions().getPositionsForValue("mc5").get(0));
|
||||
|
||||
}
|
||||
|
||||
|
||||
private boolean checkIfHazelcastObjectExists(String mapName, String mapKey) {
|
||||
IMap<String, byte[]> hcMap = hazelcastInstance.getMap(mapName);
|
||||
return hcMap==null?false:hcMap.containsKey(mapKey);
|
||||
}
|
||||
|
||||
private void deleteHazelcastObject(String mapName, String mapKey) {
|
||||
IMap<String, byte[]> hcMap = hazelcastInstance.getMap(mapName);
|
||||
if(hcMap!=null){
|
||||
hcMap.delete(mapKey);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkIfS3ObjectExists(String s3Key) {
|
||||
File f = new File(dsRootDirName, s3Key);
|
||||
return f.exists();
|
||||
}
|
||||
|
||||
private static final EntryFilter<TestModelForHds> matchAllEntryFilter = new EntryFilter<TestModelForHds>() {
|
||||
@Override
|
||||
public TestModelForHds getFilteredEntry(TestModelForHds entry) {
|
||||
return entry;
|
||||
}
|
||||
};
|
||||
|
||||
private void verifyAllOperations(int customerId, long equipmentId,
|
||||
long streamFirstModelStartTimeMs, long streamLastModelTimeMs,
|
||||
TestModelForHds mdl1, TestModelForHds mdl2, TestModelForHds mdl3, TestModelForHds mdl4,
|
||||
String indexName,
|
||||
String idxValue1, String idxValue2, EntryFilter<TestModelForHds> matchType1EntryFilter) {
|
||||
|
||||
|
||||
//test read/count with null index name - results in full scan of the data files
|
||||
List<TestModelForHds> entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter , null, null /*no index*/, TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
//test read/count with not-null index name and null set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter , indexName, null, TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
//test read/count with not-null index name and empty set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter , indexName, Collections.emptySet(), TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one non-existent value
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList("non-existing-type")), TestModelForHds.class);
|
||||
assertEquals(0, entryList.size());
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one value - uses index
|
||||
//indexed files will return only matching values - mdl1 and mdl3.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1)), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with two values
|
||||
//indexed files will return matching values - mdl1, mdl2, mdl3 and mdl4
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1, idxValue2)), TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
|
||||
//
|
||||
// Now repeat above tests with an entry filter that matches only specific records (of type type1/client1)
|
||||
//
|
||||
|
||||
//test read/count with null index name - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter , null, null /*no index*/, TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and null set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter , indexName, null, TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and empty set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter , indexName, Collections.emptySet(), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one non-existent value
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList("non-existing-type")), TestModelForHds.class);
|
||||
assertEquals(0, entryList.size());
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one value
|
||||
//indexed file will return only matching values - mdl1 and mdl3.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1)), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with two values
|
||||
//indexed files will return matching values - mdl1, mdl2, mdl3 and mdl4, entry filter will only pass mdl1 and mdl3.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1, idxValue2)), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
}
|
||||
|
||||
|
||||
|
||||
//TODO: Test invalid index operations - they all should regress into full scan of data files
|
||||
//TODO: create N data files in s3ds, 2 records each -
|
||||
// empty index file
|
||||
// index file with no record counts
|
||||
// index file with no record positions
|
||||
//TODO: test read/count with null index name
|
||||
//TODO: test read/count with not-null index name and null set of indexed values
|
||||
//TODO: test read/count with not-null index name and empty set of indexed values
|
||||
//TODO: test read/count with not-null index name and set of indexed values with one value
|
||||
//TODO: test read/count with not-null index name and set of indexed values with two values
|
||||
//TODO: verify situation when index contains data, but data file is missing
|
||||
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,423 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.context.support.PropertySourcesPlaceholderConfigurer;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import com.google.common.io.Files;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.IMap;
|
||||
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest;
|
||||
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest.HazelcastUnitTestManager;
|
||||
import com.telecominfraproject.wlan.hazelcast.common.HazelcastObjectsConfiguration;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.HierarchicalDatastore;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.RecordIndexValueExtractor;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.registry.RecordIndexRegistry;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.writer.StreamHolder;
|
||||
import com.telecominfraproject.wlan.core.model.filter.EntryFilter;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* @author dtoptygin
|
||||
*
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@SpringBootTest(webEnvironment = WebEnvironment.NONE, classes = HierarchicalDatastoreRecordIndexTests.class)
|
||||
@Import(value = {
|
||||
HazelcastForUnitTest.class,
|
||||
RecordIndexRegistry.class,
|
||||
HazelcastObjectsConfiguration.class,
|
||||
PropertySourcesPlaceholderConfigurer.class, //must have this to resolve non-string @Value annotations, i.e. int properties, etc.
|
||||
})
|
||||
@ActiveProfiles({"HazelcastForUnitTest"})
|
||||
public class HierarchicalDatastoreRecordIndexTests {
|
||||
|
||||
static{
|
||||
System.setProperty("tip.wlan.hdsExecutorQueueSize", "5000");
|
||||
System.setProperty("tip.wlan.hdsExecutorThreads", "10");
|
||||
System.setProperty("tip.wlan.hdsExecutorCoreThreadsFactor", "1");
|
||||
HazelcastUnitTestManager.initializeSystemProperty(HierarchicalDatastoreRecordIndexTests.class);
|
||||
}
|
||||
|
||||
static final HazelcastUnitTestManager testManager = new HazelcastUnitTestManager();
|
||||
|
||||
@Autowired
|
||||
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
|
||||
this.hazelcastInstance = hazelcastInstance;
|
||||
testManager.registerInstance(hazelcastInstance);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void shutdown() {
|
||||
testManager.shutdownAllInstances();
|
||||
}
|
||||
|
||||
private static final String dsRootDirName = "hds-test";
|
||||
private static final String dsPrefix = "testDsRIT";
|
||||
|
||||
String fileNamePrefix = "testF";
|
||||
String hazelcastMapPrefix = fileNamePrefix+"-";
|
||||
String hdsCreationTimestampFileMapPrefix = "hdsCreationTs-";
|
||||
|
||||
private HazelcastInstance hazelcastInstance;
|
||||
@Autowired HazelcastObjectsConfiguration hazelcastObjectsConfiguration;
|
||||
|
||||
@Autowired RecordIndexRegistry recordIndexRegistry;
|
||||
|
||||
HierarchicalDatastore hDs;
|
||||
|
||||
ExecutorService executor = Executors.newFixedThreadPool(8, new ThreadFactory(){
|
||||
int cnt;
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
Thread thr = new Thread(r, "UnitTest-HierarchicalDatastoreTests-"+(cnt++));
|
||||
thr.setDaemon(true);
|
||||
return thr;
|
||||
}
|
||||
});
|
||||
|
||||
@AfterClass
|
||||
public static void removeAllHdsFiles(){
|
||||
File rootDir = new File(dsRootDirName + File.separator + dsPrefix);
|
||||
if(rootDir.getAbsolutePath().equals("/")) {
|
||||
throw new IllegalArgumentException("attempting to delete / - please make sure your dsRootDirName and ds Prefix are not empty strings!");
|
||||
}
|
||||
|
||||
for(File f : Files.fileTreeTraverser().postOrderTraversal(rootDir)) {
|
||||
f.delete();
|
||||
}
|
||||
|
||||
rootDir.delete();
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
void initHds(){
|
||||
//remove previous datastore content, if any
|
||||
removeAllHdsFiles();
|
||||
|
||||
hDs = new HierarchicalDatastore(dsRootDirName, dsPrefix, fileNamePrefix, 1, 20L,
|
||||
hazelcastInstance, hazelcastMapPrefix, hazelcastObjectsConfiguration, recordIndexRegistry);
|
||||
}
|
||||
|
||||
|
||||
private final static String recordTypeIdx = "recordType";
|
||||
private final static String clientIdx = "client";
|
||||
|
||||
@Test
|
||||
public void testNormalOneIndex() throws IOException{
|
||||
|
||||
//create 2 data files in s3ds, 2 records each - one with record index, one without any indexes
|
||||
|
||||
String type1 = "t1";
|
||||
String type2 = "t2";
|
||||
String client1 = "c1";
|
||||
String client2 = "c2";
|
||||
String value1 = "v1";
|
||||
String value2 = "v2";
|
||||
String value3 = "v3";
|
||||
String value4 = "v4";
|
||||
|
||||
TestModelForHds mdl1 = new TestModelForHds(type1, client1, value1);
|
||||
TestModelForHds mdl2 = new TestModelForHds(type2, client2, value2);
|
||||
TestModelForHds mdl3 = new TestModelForHds(type1, client1, value3);
|
||||
TestModelForHds mdl4 = new TestModelForHds(type2, client2, value4);
|
||||
|
||||
int customerId = (int)System.currentTimeMillis();
|
||||
long equipmentId = System.currentTimeMillis();
|
||||
long streamFirstModelStartTimeMs = System.currentTimeMillis();
|
||||
|
||||
//first write a datafile when no indexes are present
|
||||
StreamHolder streamHolder1 = new StreamHolder(streamFirstModelStartTimeMs , customerId, equipmentId, hDs);
|
||||
streamHolder1.writeModelToStream(mdl1);
|
||||
streamHolder1.writeModelToStream(mdl2);
|
||||
streamHolder1.commitOutputStreamToFile();
|
||||
|
||||
//now register an index and write a second datafile, with an index
|
||||
hDs.registerRecordIndex(recordTypeIdx, new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
return Collections.singleton(((TestModelForHds)model).getRecordType());
|
||||
}
|
||||
});
|
||||
|
||||
StreamHolder streamHolder2 = new StreamHolder(streamFirstModelStartTimeMs + 100 , customerId, equipmentId, hDs);
|
||||
streamHolder2.writeModelToStream(mdl3);
|
||||
streamHolder2.writeModelToStream(mdl4);
|
||||
streamHolder2.commitOutputStreamToFile();
|
||||
|
||||
long streamLastModelTimeMs = System.currentTimeMillis();
|
||||
if(streamLastModelTimeMs <= streamFirstModelStartTimeMs){
|
||||
streamLastModelTimeMs = streamFirstModelStartTimeMs + 200;
|
||||
}
|
||||
|
||||
//verify that 3 files were written by this time: one data file without index, and one data file with an index
|
||||
List<String> dataFileNames = hDs.getFileNames(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs);
|
||||
assertEquals(2, dataFileNames.size());
|
||||
|
||||
assertTrue(dataFileNames.contains(streamHolder1.getFullFileName()));
|
||||
assertTrue(dataFileNames.contains(streamHolder2.getFullFileName()));
|
||||
|
||||
assertFalse(checkIfHazelcastObjectExists("recIdx-" + hazelcastMapPrefix + recordTypeIdx ,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder1.getFullFileName(), recordTypeIdx)));
|
||||
|
||||
//second index should exist because the index was registered with s3ds at the time of writing the data file
|
||||
assertTrue(checkIfHazelcastObjectExists("recIdx-"+ hazelcastMapPrefix + recordTypeIdx ,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder2.getFullFileName(), recordTypeIdx)));
|
||||
|
||||
EntryFilter<TestModelForHds> type1EntryFilter = new EntryFilter<TestModelForHds>() {
|
||||
@Override
|
||||
public TestModelForHds getFilteredEntry(TestModelForHds entry) {
|
||||
if(entry.getClient().equals(client1)){
|
||||
return entry;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
verifyAllOperations(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, mdl1, mdl2, mdl3, mdl4, recordTypeIdx, type1, type2, type1EntryFilter);
|
||||
|
||||
//Test normal index operations when more than one index is present
|
||||
//We will remove files with indexes, register two indexes in s3ds, create one more data file with 2 index files, and verify all operations again
|
||||
File f = new File(hDs.getDsRootDirName(), streamHolder2.getFullFileName());
|
||||
f.delete();
|
||||
deleteHazelcastObject("recIdx-"+ hazelcastMapPrefix + recordTypeIdx , HierarchicalDatastore.getIndexFileName(streamHolder2.getFullFileName(), recordTypeIdx));
|
||||
assertFalse(checkIfS3ObjectExists(streamHolder2.getFullFileName()));
|
||||
assertFalse(checkIfHazelcastObjectExists("recIdx-" + hazelcastMapPrefix + recordTypeIdx ,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder2.getFullFileName(), recordTypeIdx)));
|
||||
|
||||
//remove cached filename list from hazelcast
|
||||
String tsMapName = hdsCreationTimestampFileMapPrefix + fileNamePrefix+"-";
|
||||
IMap<String, List<Long>> tsMap = hazelcastInstance.getMap(tsMapName);
|
||||
tsMap.clear();
|
||||
|
||||
IMap<String, Set<String>> dirListMap = hazelcastInstance.getMap(hazelcastObjectsConfiguration.getHdsDirectoryListingsMapName());
|
||||
dirListMap.clear();
|
||||
|
||||
|
||||
//now register a second index and write a third datafile, with two indexes
|
||||
hDs.registerRecordIndex(clientIdx, new RecordIndexValueExtractor() {
|
||||
@Override
|
||||
public Set<String> extractValues(BaseJsonModel model) {
|
||||
return Collections.singleton(((TestModelForHds)model).getClient());
|
||||
}
|
||||
});
|
||||
|
||||
StreamHolder streamHolder3 = new StreamHolder(streamFirstModelStartTimeMs + 100 , customerId, equipmentId, hDs);
|
||||
streamHolder3.writeModelToStream(mdl3);
|
||||
streamHolder3.writeModelToStream(mdl4);
|
||||
streamHolder3.commitOutputStreamToFile();
|
||||
|
||||
streamLastModelTimeMs = System.currentTimeMillis();
|
||||
if(streamLastModelTimeMs <= streamFirstModelStartTimeMs){
|
||||
streamLastModelTimeMs = streamFirstModelStartTimeMs + 200;
|
||||
}
|
||||
|
||||
//verify that 4 files were written by this time: one data file without index, and one data file with two indexes
|
||||
dataFileNames = hDs.getFileNames(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs);
|
||||
assertEquals(2, dataFileNames.size());
|
||||
|
||||
assertTrue(dataFileNames.contains(streamHolder1.getFullFileName()));
|
||||
assertTrue(dataFileNames.contains(streamHolder3.getFullFileName()));
|
||||
|
||||
assertFalse(checkIfHazelcastObjectExists("recIdx-"+ hazelcastMapPrefix + recordTypeIdx ,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder1.getFullFileName(), recordTypeIdx)));
|
||||
|
||||
//second index should exist because the index was registered with s3ds at the time of writing the data file
|
||||
assertTrue(checkIfHazelcastObjectExists("recIdx-" + hazelcastMapPrefix + recordTypeIdx ,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder3.getFullFileName(), recordTypeIdx)));
|
||||
assertTrue(checkIfHazelcastObjectExists("recIdx-"+ hazelcastMapPrefix + clientIdx,
|
||||
HierarchicalDatastore.getIndexFileName(streamHolder3.getFullFileName(), clientIdx)));
|
||||
|
||||
EntryFilter<TestModelForHds> client1EntryFilter = new EntryFilter<TestModelForHds>() {
|
||||
@Override
|
||||
public TestModelForHds getFilteredEntry(TestModelForHds entry) {
|
||||
if(entry.getClient().equals(client1)){
|
||||
return entry;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
//verify operations with first index
|
||||
verifyAllOperations(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, mdl1, mdl2, mdl3, mdl4, recordTypeIdx, type1, type2, type1EntryFilter);
|
||||
|
||||
//verify operations with second index
|
||||
//we can re-use verification logic because models with type1 values have client1 and models with type2 values have client2
|
||||
verifyAllOperations(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, mdl1, mdl2, mdl3, mdl4, clientIdx, client1, client2, client1EntryFilter);
|
||||
|
||||
}
|
||||
|
||||
|
||||
private boolean checkIfHazelcastObjectExists(String mapName, String mapKey) {
|
||||
IMap<String, byte[]> hcMap = hazelcastInstance.getMap(mapName);
|
||||
return hcMap==null?false:hcMap.containsKey(mapKey);
|
||||
}
|
||||
|
||||
private void deleteHazelcastObject(String mapName, String mapKey) {
|
||||
IMap<String, byte[]> hcMap = hazelcastInstance.getMap(mapName);
|
||||
if(hcMap!=null){
|
||||
hcMap.delete(mapKey);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkIfS3ObjectExists(String s3Key) {
|
||||
File f = new File(hDs.getDsRootDirName(), s3Key);
|
||||
return f.exists();
|
||||
}
|
||||
|
||||
|
||||
private void verifyAllOperations(int customerId, long equipmentId,
|
||||
long streamFirstModelStartTimeMs, long streamLastModelTimeMs,
|
||||
TestModelForHds mdl1, TestModelForHds mdl2, TestModelForHds mdl3, TestModelForHds mdl4,
|
||||
String indexName,
|
||||
String idxValue1, String idxValue2, EntryFilter<TestModelForHds> matchType1EntryFilter) {
|
||||
|
||||
EntryFilter<TestModelForHds> matchAllEntryFilter = new EntryFilter<TestModelForHds>() {
|
||||
@Override
|
||||
public TestModelForHds getFilteredEntry(TestModelForHds entry) {
|
||||
return entry;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//test read/count with null index name - results in full scan of the data files
|
||||
List<TestModelForHds> entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter , null, null /*no index*/, TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
//test read/count with not-null index name and null set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter , indexName, null, TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
//test read/count with not-null index name and empty set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter , indexName, Collections.emptySet(), TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one non-existent value - results in full scan for the data files that do not have indexes, and uses index when possible
|
||||
//in our case not-indexed file will return all its data - mdl1 and mdl2, and indexed file will be skipped.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList("non-existing-type")), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one value - results in full scan for the data files that do not have indexes, and uses index when possible
|
||||
//in our case not-indexed file will return all its data - mdl1 and mdl2, and indexed file will return only one matching value - mdl3.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1)), TestModelForHds.class);
|
||||
assertEquals(3, entryList.size());
|
||||
//System.err.println(entryList);
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with two values - results in full scan for the data files that do not have indexes, and uses index when possible
|
||||
//in our case not-indexed file will return all its data - mdl1 and mdl2, and indexed file will return matching values - mdl3 and mdl4.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchAllEntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1, idxValue2)), TestModelForHds.class);
|
||||
assertEquals(4, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl2));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
assertTrue(entryList.contains(mdl4));
|
||||
|
||||
|
||||
//
|
||||
// Now repeat above tests with an entry filter that matches only specific records (of type type1/client1)
|
||||
//
|
||||
|
||||
//test read/count with null index name - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter , null, null /*no index*/, TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and null set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter , indexName, null, TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and empty set of indexed values - results in full scan of the data files
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter , indexName, Collections.emptySet(), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one non-existent value - results in full scan for the data files that do not have indexes, and uses index when possible
|
||||
//in our case not-indexed file will return all its matching data - mdl1, and indexed file will be skipped.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList("non-existing-type")), TestModelForHds.class);
|
||||
assertEquals(1, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with one value - results in full scan for the data files that do not have indexes, and uses index when possible
|
||||
//in our case not-indexed file will return all its matching data - mdl1, and indexed file will return only one matching value - mdl3.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1)), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
|
||||
//test read/count with not-null index name and set of indexed values with two values - results in full scan for the data files that do not have indexes, and uses index when possible
|
||||
//in our case not-indexed file will return all its matching data - mdl1; and indexed file will return matching values - mdl3 and mdl4, entry filter will only pass mdl3.
|
||||
entryList = hDs.getEntries(customerId, equipmentId, streamFirstModelStartTimeMs, streamLastModelTimeMs, matchType1EntryFilter ,
|
||||
indexName, new HashSet<>(Arrays.asList(idxValue1, idxValue2)), TestModelForHds.class);
|
||||
assertEquals(2, entryList.size());
|
||||
assertTrue(entryList.contains(mdl1));
|
||||
assertTrue(entryList.contains(mdl3));
|
||||
}
|
||||
|
||||
|
||||
|
||||
//TODO: Test missing/invalid index operations - they all should regress into full scan of data files
|
||||
//TODO: create N data files in s3ds, 2 records each -
|
||||
// empty index file
|
||||
// index file with no record counts
|
||||
// index file with no record positions
|
||||
//TODO: test read/count with null index name
|
||||
//TODO: test read/count with not-null index name and null set of indexed values
|
||||
//TODO: test read/count with not-null index name and empty set of indexed values
|
||||
//TODO: test read/count with not-null index name and set of indexed values with one value
|
||||
//TODO: test read/count with not-null index name and set of indexed values with two values
|
||||
|
||||
|
||||
|
||||
}
|
||||
@@ -0,0 +1,308 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.context.support.PropertySourcesPlaceholderConfigurer;
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import com.google.common.io.Files;
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.telecominfraproject.wlan.core.model.equipment.MacAddress;
|
||||
import com.telecominfraproject.wlan.core.model.filter.EntryFilter;
|
||||
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest;
|
||||
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest.HazelcastUnitTestManager;
|
||||
import com.telecominfraproject.wlan.hazelcast.common.HazelcastObjectsConfiguration;
|
||||
import com.telecominfraproject.wlan.hierarchical.datastore.index.registry.RecordIndexRegistry;
|
||||
|
||||
/**
|
||||
* @author dtoptygin
|
||||
*
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@SpringBootTest(webEnvironment = WebEnvironment.NONE, classes = HierarchicalDatastoreTests.class)
|
||||
@Import(value = {
|
||||
PropertySourcesPlaceholderConfigurer.class,
|
||||
HazelcastForUnitTest.class,
|
||||
RecordIndexRegistry.class,
|
||||
HazelcastObjectsConfiguration.class,
|
||||
})
|
||||
@ActiveProfiles({"HazelcastForUnitTest"})
|
||||
public class HierarchicalDatastoreTests {
|
||||
|
||||
static{
|
||||
System.setProperty("tip.wlan.hdsExecutorQueueSize", "5000");
|
||||
System.setProperty("tip.wlan.hdsExecutorThreads", "10");
|
||||
System.setProperty("tip.wlan.hdsExecutorCoreThreadsFactor", "1");
|
||||
HazelcastUnitTestManager.initializeSystemProperty(HierarchicalDatastoreTests.class);
|
||||
}
|
||||
|
||||
static final HazelcastUnitTestManager testManager = new HazelcastUnitTestManager();
|
||||
|
||||
@Autowired
|
||||
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
|
||||
this.hazelcastInstance = hazelcastInstance;
|
||||
testManager.registerInstance(hazelcastInstance);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void shutdown() {
|
||||
testManager.shutdownAllInstances();
|
||||
}
|
||||
|
||||
private static final String dsRootDirName = "hds-test";
|
||||
private static final String dsPrefix = "testDs";
|
||||
String fileNamePrefix = "testF";
|
||||
String hazelcastMapPrefix = fileNamePrefix+"-";
|
||||
|
||||
private HazelcastInstance hazelcastInstance;
|
||||
@Autowired RecordIndexRegistry recordIndexRegistry;
|
||||
@Autowired HazelcastObjectsConfiguration hazelcastObjectsConfiguration;
|
||||
|
||||
//test with 1 minute per file in s3
|
||||
//TODO: test with 5, 15, 30, 60 (1hr), 240 (4 hrs), 1440 (24 hrs) - make sure tiered tables work with this
|
||||
HierarchicalDatastore hDs;
|
||||
|
||||
ExecutorService executor = Executors.newFixedThreadPool(8, new ThreadFactory(){
|
||||
int cnt;
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
Thread thr = new Thread(r, "UnitTest-HierarchicalDatastoreTests-"+(cnt++));
|
||||
thr.setDaemon(true);
|
||||
return thr;
|
||||
}
|
||||
});
|
||||
|
||||
@PostConstruct
|
||||
void initHds(){
|
||||
//remove previous datastore content, if any
|
||||
removeAllHdsFiles();
|
||||
|
||||
hDs = new HierarchicalDatastore(dsRootDirName, dsPrefix, fileNamePrefix, 1, 20L,
|
||||
hazelcastInstance, hazelcastMapPrefix, hazelcastObjectsConfiguration, recordIndexRegistry);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void removeAllHdsFiles(){
|
||||
File rootDir = new File(dsRootDirName + File.separator + dsPrefix);
|
||||
if(rootDir.getAbsolutePath().equals("/")) {
|
||||
throw new IllegalArgumentException("attempting to delete / - please make sure your dsRootDirName and ds Prefix are not empty strings!");
|
||||
}
|
||||
|
||||
for(File f : Files.fileTreeTraverser().postOrderTraversal(rootDir)) {
|
||||
f.delete();
|
||||
}
|
||||
|
||||
rootDir.delete();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetFileNames() throws IOException, InterruptedException {
|
||||
|
||||
Calendar fromC = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
Calendar toC_8hrs = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
Calendar toC_15min = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
Calendar toC_1day = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
Calendar toC_1month = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
|
||||
|
||||
//init calendars
|
||||
fromC.set( 2015, 10, 9, 7, 4);
|
||||
toC_8hrs.set( 2015, 10, 9, 15, 4);
|
||||
toC_15min.set( 2015, 10, 9, 7, 19);
|
||||
toC_1day.set( 2015, 10, 10, 7, 4);
|
||||
toC_1month.set( 2015, 11, 9, 7, 4);
|
||||
|
||||
int customerId = 42;
|
||||
long equipmentId = 314;
|
||||
|
||||
//pre-create files in hDs - in the range (fromC, toC_1month), without this getFileNames has nothing to work with
|
||||
Calendar cal = (Calendar) fromC.clone();
|
||||
int numItems = 5; //number of items to create in each zipped file entry
|
||||
List<Future<Boolean>> futures = new ArrayList<>();
|
||||
AtomicInteger numFilesCreated = new AtomicInteger();
|
||||
|
||||
//we'll use the same content in all the files, as we're not interested in it - only care about file names here
|
||||
String fileNameFirstFile = hDs.getFileNameForNewFile(customerId, equipmentId, cal.getTimeInMillis());
|
||||
byte[] zippedBytes;
|
||||
try {
|
||||
zippedBytes = createZippedFileBytes(fileNameFirstFile.substring(fileNameFirstFile.lastIndexOf('/')+1, fileNameFirstFile.length()-4), numItems);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
//use this for month-long range, which makes the test run for 129 sec
|
||||
//while(cal.before(toC_1month) || cal.equals(toC_1month)){
|
||||
|
||||
//use 1 day range for the tests
|
||||
while(cal.before(toC_1day) || cal.equals(toC_1day)){
|
||||
final long calTime = cal.getTimeInMillis() + 15; //distort nicely aligned timestamps, to see that they are normalized back
|
||||
String fileName = hDs.getFileNameForNewFile(customerId, equipmentId, calTime);
|
||||
|
||||
futures.add(executor.submit(new Callable<Boolean>(){
|
||||
@Override
|
||||
public Boolean call() {
|
||||
InputStream inputStream = new ByteArrayInputStream(zippedBytes);
|
||||
hDs.uploadStreamToFileOverwriteOld(inputStream, zippedBytes.length, fileName);
|
||||
|
||||
numFilesCreated.incrementAndGet();
|
||||
return true;
|
||||
}
|
||||
}));
|
||||
cal.add(Calendar.MINUTE, hDs.getNumberOfMinutesPerFile());
|
||||
}
|
||||
|
||||
//wait until all files are created in S3
|
||||
for(Future<Boolean> f: futures){
|
||||
try {
|
||||
f.get();
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
} catch (ExecutionException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
Thread.sleep(100);
|
||||
|
||||
//Now tests can begin
|
||||
|
||||
//frequent case - 15 minutes worth of data
|
||||
List<String> fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis(), toC_15min.getTimeInMillis()); //built 16 files, took 84 ms
|
||||
assertEquals(16, fileNames.size());
|
||||
Collections.sort(fileNames);
|
||||
assertEquals("testDs/42/314/2015/11/09/07/testF_42_314_2015_11_09_07_04", truncateCreatedTimestamp(fileNames.get(0)));
|
||||
assertEquals("testDs/42/314/2015/11/09/07/testF_42_314_2015_11_09_07_19", truncateCreatedTimestamp(fileNames.get(fileNames.size()-1)));
|
||||
|
||||
//regular case - 8 hrs worth of data: from < to time
|
||||
fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis(), toC_8hrs.getTimeInMillis()); //built 481 files, took 131 ms
|
||||
assertEquals(481, fileNames.size());
|
||||
Collections.sort(fileNames);
|
||||
assertEquals("testDs/42/314/2015/11/09/07/testF_42_314_2015_11_09_07_04", truncateCreatedTimestamp(fileNames.get(0)));
|
||||
assertEquals("testDs/42/314/2015/11/09/15/testF_42_314_2015_11_09_15_04", truncateCreatedTimestamp(fileNames.get(fileNames.size()-1)));
|
||||
|
||||
//ok case - a day worth of data
|
||||
fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis(), toC_1day.getTimeInMillis()); //built 1441 files, took 271 ms
|
||||
assertEquals(1441, fileNames.size());
|
||||
Collections.sort(fileNames);
|
||||
assertEquals("testDs/42/314/2015/11/09/07/testF_42_314_2015_11_09_07_04", truncateCreatedTimestamp(fileNames.get(0)));
|
||||
assertEquals("testDs/42/314/2015/11/10/07/testF_42_314_2015_11_10_07_04", truncateCreatedTimestamp(fileNames.get(fileNames.size()-1)));
|
||||
|
||||
// //tight case - a month worth of data <- should probably draw a line here
|
||||
// fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis(), toC_1month.getTimeInMillis()); //built 43201 files, took 69443 ms
|
||||
// assertEquals(43201, fileNames.size());
|
||||
// assertEquals("testDs/42/314/2015/11/09/07/testF_42_314_2015_11_09_07_04", truncateCreatedTimestamp(fileNames.get(0)));
|
||||
// assertEquals("testDs/42/314/2015/12/09/07/testF_42_314_2015_12_09_07_04", truncateCreatedTimestamp(fileNames.get(fileNames.size()-1)));
|
||||
//
|
||||
// //repeat the call, now it should completely come from hazelcast, because the data should have been cached
|
||||
// fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis(), toC_1month.getTimeInMillis()); //built 43201 files, took 875 ms
|
||||
// assertEquals(43201, fileNames.size());
|
||||
|
||||
// //extreme case - a year worth of data
|
||||
// toC.set(2016, 10, 9, 7, 4);
|
||||
// fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis(), toC.getTimeInMillis());
|
||||
// assertEquals(527041, fileNames.size());
|
||||
// assertEquals("testDs/42/314/2015/11/09/07/testF_42_314_2015_11_09_07_04", truncateCreatedTimestamp(fileNames.get(0)));
|
||||
// assertEquals("testDs/42/314/2016/11/09/07/testF_42_314_2016_11_09_07_04", truncateCreatedTimestamp(fileNames.get(fileNames.size()-1)));
|
||||
|
||||
//same from and to times should result in 1 file name
|
||||
fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis(), fromC.getTimeInMillis()); //built 1 files, took 0 ms
|
||||
assertEquals(1, fileNames.size());
|
||||
assertEquals("testDs/42/314/2015/11/09/07/testF_42_314_2015_11_09_07_04", truncateCreatedTimestamp(fileNames.get(0)));
|
||||
|
||||
//from > to time should result in empty file names list
|
||||
fileNames = hDs.getFileNames(42, 314, fromC.getTimeInMillis() + 1, fromC.getTimeInMillis()); //built 0 files
|
||||
assertEquals(0, fileNames.size());
|
||||
|
||||
}
|
||||
|
||||
private String truncateCreatedTimestamp(String fileName){
|
||||
return fileName.substring(0, fileName.lastIndexOf('_'));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadFile() throws Exception {
|
||||
//<T> List<T> getContent(InputStream inputStream, EntryFilter<T> entryFilter);
|
||||
|
||||
//create test stream of zipped MacAddress json objects, one per line
|
||||
int numItems = 10;
|
||||
|
||||
//now test the getContent() method on that created stream
|
||||
List<MacAddress> result = HierarchicalDatastore.getContent(
|
||||
new ByteArrayInputStream(createZippedFileBytes("testF_42_314_2015_10_09_07_04", numItems)),
|
||||
new EntryFilter<MacAddress>(){
|
||||
@Override
|
||||
public MacAddress getFilteredEntry(MacAddress r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
}, null /*do full scan*/, MacAddress.class);
|
||||
|
||||
assertEquals(numItems, result.size());
|
||||
assertEquals(0L, (long) result.get(0).getAddressAsLong());
|
||||
assertEquals((long) (numItems - 1), (long) result.get(result.size() - 1).getAddressAsLong());
|
||||
}
|
||||
|
||||
/**
|
||||
* @param partFileName - file name for the entry within zipped file, usually the same as the name of the zip file without the .gz extension
|
||||
* @param numItems - number of JSON models to create in the zipped file entry
|
||||
* @return bytes, representing the zipped file contents, to be used as { new ByteArrayInputStream(createZippedFileBytes("testF_42_314_2015_10_09_07_04", numItems)) }
|
||||
* @throws IOException
|
||||
*/
|
||||
private byte[] createZippedFileBytes(String partFileName, int numItems) throws IOException{
|
||||
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
|
||||
ZipOutputStream zos = new ZipOutputStream(outputStream);
|
||||
ZipEntry entry = new ZipEntry(partFileName);
|
||||
zos.putNextEntry(entry);
|
||||
|
||||
MacAddress macAddress;
|
||||
|
||||
for(int i =0; i<numItems; i++){
|
||||
macAddress = new MacAddress((long)i);
|
||||
|
||||
byte[] itemBytes = macAddress.toString().getBytes(StandardCharsets.UTF_8);
|
||||
zos.write(itemBytes);
|
||||
zos.write(13);
|
||||
zos.write(10);
|
||||
}
|
||||
|
||||
zos.closeEntry();
|
||||
zos.flush();
|
||||
zos.close();
|
||||
|
||||
outputStream.flush();
|
||||
outputStream.close();
|
||||
|
||||
return outputStream.toByteArray();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
package com.telecominfraproject.wlan.hierarchical.datastore;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* This model is used in generic Hierarchical DataStore unit tests.
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
class TestModelForHds extends BaseJsonModel{
|
||||
private static final long serialVersionUID = -4915028451879087322L;
|
||||
|
||||
private String recordType;
|
||||
private String client;
|
||||
private String value;
|
||||
private Set<String> manyClients;
|
||||
|
||||
public TestModelForHds() {
|
||||
}
|
||||
|
||||
public TestModelForHds(String recordType, String client, String value) {
|
||||
this.recordType = recordType;
|
||||
this.client = client;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
return value;
|
||||
}
|
||||
public void setValue(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
public String getRecordType() {
|
||||
return recordType;
|
||||
}
|
||||
public void setRecordType(String recordType) {
|
||||
this.recordType = recordType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((recordType == null) ? 0 : recordType.hashCode());
|
||||
result = prime * result + ((value == null) ? 0 : value.hashCode());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (!(obj instanceof TestModelForHds)) {
|
||||
return false;
|
||||
}
|
||||
TestModelForHds other = (TestModelForHds) obj;
|
||||
if (recordType == null) {
|
||||
if (other.recordType != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!recordType.equals(other.recordType)) {
|
||||
return false;
|
||||
}
|
||||
if (value == null) {
|
||||
if (other.value != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!value.equals(other.value)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public String getClient() {
|
||||
return client;
|
||||
}
|
||||
|
||||
public void setClient(String client) {
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
public Set<String> getManyClients() {
|
||||
return manyClients;
|
||||
}
|
||||
|
||||
public void setManyClients(Set<String> manyClients) {
|
||||
this.manyClients = manyClients;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
57
base-hierarchical-datastore/src/test/resources/logback.xml
Normal file
57
base-hierarchical-datastore/src/test/resources/logback.xml
Normal file
@@ -0,0 +1,57 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!-- For assistance related to logback-translator or configuration -->
|
||||
<!-- files in general, please contact the logback user mailing list -->
|
||||
<!-- at http://www.qos.ch/mailman/listinfo/logback-user -->
|
||||
<!-- -->
|
||||
<!-- For professional support please see -->
|
||||
<!-- http://www.qos.ch/shop/products/professionalSupport -->
|
||||
<!-- -->
|
||||
<configuration>
|
||||
<appender name="stdout" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-DD HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--
|
||||
<appender name="FILE" class="ch.qos.logback.core.FileAppender">
|
||||
<file>myApp.log</file>
|
||||
|
||||
<encoder>
|
||||
<pattern>%date %level [%thread] %logger{10} [%file:%line] %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
details: http://logback.qos.ch/manual/configuration.html#auto_configuration
|
||||
|
||||
runtime configuration, if need to override the defaults:
|
||||
-Dlogback.configurationFile=/path/to/logback.xml
|
||||
|
||||
for log configuration debugging - use
|
||||
-Dlogback.statusListenerClass=ch.qos.logback.core.status.OnConsoleStatusListener
|
||||
|
||||
log levels:
|
||||
OFF ERROR WARN INFO DEBUG TRACE
|
||||
-->
|
||||
<logger name="org.springframework" level="WARN"/>
|
||||
<logger name="org.reflections" level="WARN"/>
|
||||
<logger name="org.apache.catalina.startup.DigesterFactory" level="ERROR"/>
|
||||
<logger name="org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping" level="${art2waveLogLevel:-INFO}"/>
|
||||
<logger name="org.springframework.boot.context.embedded.tomcat.TomcatEmbeddedServletContainer" level="${art2waveLogLevel:-INFO}"/>
|
||||
|
||||
<logger name="com.whizcontrol" level="${art2waveLogLevel:-DEBUG}"/>
|
||||
<logger name="com.netflix.servo.tag.aws.AwsInjectableTag" level="OFF"/>
|
||||
|
||||
<!--
|
||||
<logger name="org.springframework.security.web.authentication.preauth" level="DEBUG"/>
|
||||
-->
|
||||
|
||||
<root level="WARN">
|
||||
<appender-ref ref="stdout"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
46
base-jdbc-tests/pom.xml
Normal file
46
base-jdbc-tests/pom.xml
Normal file
@@ -0,0 +1,46 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-jdbc-tests</artifactId>
|
||||
<name>base-jdbc-tests</name>
|
||||
<description>Common classes used by JDBC unit tests.</description>
|
||||
|
||||
<dependencies>
|
||||
<!-- JDBC Data Access Library (depends on spring-core, spring-beans, spring-context,
|
||||
spring-tx) Define this if you use Spring's JdbcTemplate API (org.springframework.jdbc.*) -->
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-jdbc</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.hsqldb</groupId>
|
||||
<artifactId>hsqldb</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-test</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Pojo generator -->
|
||||
<dependency>
|
||||
<groupId>uk.co.jemos.podam</groupId>
|
||||
<artifactId>podam</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,138 @@
|
||||
package com.telecominfraproject.wlan.core.server.jdbc.test;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
||||
import org.junit.runner.RunWith;
|
||||
import org.reflections.Reflections;
|
||||
import org.reflections.scanners.ResourcesScanner;
|
||||
import org.reflections.util.ClasspathHelper;
|
||||
import org.reflections.util.ConfigurationBuilder;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Primary;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
|
||||
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabase;
|
||||
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabaseBuilder;
|
||||
import org.springframework.test.annotation.Rollback;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
import org.springframework.test.context.transaction.AfterTransaction;
|
||||
import org.springframework.test.context.transaction.BeforeTransaction;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import com.telecominfraproject.wlan.core.server.jdbc.BaseJDbcDataSource;
|
||||
import com.telecominfraproject.wlan.core.server.jdbc.BaseKeyColumnConverter;
|
||||
import com.telecominfraproject.wlan.core.server.jdbc.KeyColumnUpperCaseConverter;
|
||||
|
||||
/**
|
||||
* Base classes for JDBC DAOs. Note that all tests will be run within
|
||||
* transaction (one tx per test method), and all the db changes <b>will be
|
||||
* rolled back</b> at the end of the transaction.
|
||||
*
|
||||
* <p>
|
||||
* When executing transactional tests, it is sometimes useful to be able to
|
||||
* execute certain <em>set up</em> or <em>tear down</em> code outside of a
|
||||
* transaction. This can be achieved by annotating methods with
|
||||
* {@link BeforeTransaction @BeforeTransaction} and
|
||||
* {@link AfterTransaction @AfterTransaction}.
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* @Import(value = { TestConfiguration.class })
|
||||
* @TestWithEmbeddedDB
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @author dtop
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@SpringBootTest(webEnvironment = WebEnvironment.NONE, classes = BaseJdbcTest.Config.class)
|
||||
@Rollback(value = true)
|
||||
@Transactional
|
||||
public abstract class BaseJdbcTest {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseJdbcTest.class);
|
||||
|
||||
@Autowired(required = false)
|
||||
protected EmbeddedDatabase db;
|
||||
|
||||
public static class BaseJdbcTestDatabase extends BaseJDbcDataSource implements EmbeddedDatabase {
|
||||
|
||||
public BaseJdbcTestDatabase(EmbeddedDatabase targetDataSource, BaseKeyColumnConverter targetConverter) {
|
||||
super(targetDataSource, targetConverter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
EmbeddedDatabase db = (EmbeddedDatabase) getTargetDataSource();
|
||||
if (db != null) {
|
||||
db.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Configuration
|
||||
// @PropertySource({ "classpath:persistence-${envTarget:dev}.properties" })
|
||||
public static class Config {
|
||||
// Put all required @Bean -s in here - they will be injected into the
|
||||
// AppIntegrationTest class
|
||||
|
||||
@Bean
|
||||
@Profile("use_embedded_db")
|
||||
@Primary
|
||||
EmbeddedDatabase getEmbeddedDatabase() {
|
||||
// creates a HSQL in-memory db populated from scripts
|
||||
// classpath:schema-hsqldb-test.sql and classpath:test-data.sql
|
||||
// this will auto-wire DataSource object
|
||||
EmbeddedDatabaseBuilder builder = new EmbeddedDatabaseBuilder()
|
||||
.addScript("classpath:schema-hsqldb-test.sql");
|
||||
|
||||
builder.generateUniqueName(true);
|
||||
|
||||
//
|
||||
// We only want to add the test-data.sql if the file actually
|
||||
// exists.
|
||||
//
|
||||
Reflections reflections = new Reflections(new ConfigurationBuilder()
|
||||
.setUrls(ClasspathHelper.forPackage("com.whizcontrol")).setScanners(new ResourcesScanner()));
|
||||
Set<String> testDataFiles = reflections.getResources(Pattern.compile("test-data.sql"));
|
||||
|
||||
if (!CollectionUtils.isEmpty(testDataFiles)) {
|
||||
builder.addScript("classpath:test-data.sql");
|
||||
}
|
||||
|
||||
EmbeddedDatabase db = builder.build();
|
||||
return new BaseJdbcTestDatabase(db, new KeyColumnUpperCaseConverter());
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Primary
|
||||
public PlatformTransactionManager transactionManager(DataSource dataSource) {
|
||||
return new DataSourceTransactionManager(dataSource);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@BeforeTransaction
|
||||
public void beforeTx() {
|
||||
LOG.debug("*** before Tx");
|
||||
}
|
||||
|
||||
@AfterTransaction
|
||||
public void afterTx() {
|
||||
LOG.debug("*** after Tx");
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package com.telecominfraproject.wlan.core.server.jdbc.test;
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
import org.springframework.test.context.ActiveProfiles;
|
||||
|
||||
/**
|
||||
* <b>Be careful: This annotation will be overwritten by any
|
||||
* other @ActiveProfiles annotations.</b>
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.TYPE)
|
||||
@ActiveProfiles(profiles = {
|
||||
// test against embedded database
|
||||
"use_embedded_db", "use_single_ds" })
|
||||
public @interface TestWithEmbeddedDB {
|
||||
|
||||
}
|
||||
43
base-jdbc/pom.xml
Normal file
43
base-jdbc/pom.xml
Normal file
@@ -0,0 +1,43 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-jdbc</artifactId>
|
||||
<name>base-jdbc</name>
|
||||
<description>Common classes used by jdbc data sources and connection pools.</description>
|
||||
|
||||
<dependencies>
|
||||
<!-- JDBC Data Access Library
|
||||
(depends on spring-core, spring-beans, spring-context, spring-tx)
|
||||
Define this if you use Spring's JdbcTemplate API
|
||||
(org.springframework.jdbc.*)-->
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-jdbc</artifactId>
|
||||
</dependency>
|
||||
<!-- Transaction Management Abstraction
|
||||
(depends on spring-core, spring-beans, spring-aop, spring-context)
|
||||
Define this if you use Spring Transactions or DAO Exception Hierarchy
|
||||
(org.springframework.transaction.*/org.springframework.dao.*)-->
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-tx</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-dbcp2</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>cloud-metrics</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,239 @@
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
import java.io.PrintWriter;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.sql.Connection;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.SQLFeatureNotSupportedException;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
||||
import org.apache.commons.codec.digest.DigestUtils;
|
||||
import org.apache.commons.dbcp2.cpdsadapter.DriverAdapterCPDS;
|
||||
import org.apache.commons.dbcp2.datasources.SharedPoolDataSource;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.core.env.Environment;
|
||||
|
||||
import com.netflix.servo.DefaultMonitorRegistry;
|
||||
import com.netflix.servo.annotations.DataSourceType;
|
||||
import com.netflix.servo.annotations.Monitor;
|
||||
import com.netflix.servo.monitor.BasicGauge;
|
||||
import com.netflix.servo.monitor.MonitorConfig;
|
||||
import com.netflix.servo.monitor.Monitors;
|
||||
import com.netflix.servo.tag.TagList;
|
||||
import com.telecominfraproject.wlan.cloudmetrics.CloudMetricsTags;
|
||||
import com.telecominfraproject.wlan.server.exceptions.ConfigurationException;
|
||||
|
||||
public abstract class BaseDataSourceConfig {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseDataSourceConfig.class);
|
||||
|
||||
private final TagList tags = CloudMetricsTags.commonTags;
|
||||
|
||||
@Autowired
|
||||
private Environment environment;
|
||||
|
||||
@Monitor(name = "getConnection", type = DataSourceType.COUNTER)
|
||||
private final AtomicInteger getConnectionExecuted = new AtomicInteger(0);
|
||||
|
||||
static interface DataSourceInSpringClassloaderInterface extends DataSource {
|
||||
}
|
||||
|
||||
class DataSourceInSpringClassloader implements DataSourceInSpringClassloaderInterface {
|
||||
DataSource dataSource;
|
||||
String id;
|
||||
|
||||
public DataSourceInSpringClassloader(String datasourceId, DataSource dataSource) {
|
||||
this.dataSource = dataSource;
|
||||
this.id = datasourceId;
|
||||
}
|
||||
|
||||
public PrintWriter getLogWriter() throws SQLException {
|
||||
return dataSource.getLogWriter();
|
||||
}
|
||||
|
||||
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||
return dataSource.unwrap(iface);
|
||||
}
|
||||
|
||||
public void setLogWriter(PrintWriter out) throws SQLException {
|
||||
dataSource.setLogWriter(out);
|
||||
}
|
||||
|
||||
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||
return dataSource.isWrapperFor(iface);
|
||||
}
|
||||
|
||||
public Connection getConnection() throws SQLException {
|
||||
getConnectionExecuted.incrementAndGet();
|
||||
return dataSource.getConnection();
|
||||
}
|
||||
|
||||
public void setLoginTimeout(int seconds) throws SQLException {
|
||||
dataSource.setLoginTimeout(seconds);
|
||||
}
|
||||
|
||||
public Connection getConnection(String username, String password) throws SQLException {
|
||||
getConnectionExecuted.incrementAndGet();
|
||||
return dataSource.getConnection(username, password);
|
||||
}
|
||||
|
||||
public int getLoginTimeout() throws SQLException {
|
||||
return dataSource.getLoginTimeout();
|
||||
}
|
||||
|
||||
public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException {
|
||||
return dataSource.getParentLogger();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
|
||||
public DataSource getDataSource() {
|
||||
Properties dataSourceProperties = getDataSourceProperties();
|
||||
DriverAdapterCPDS driverAdapterCPDS = new DriverAdapterCPDS();
|
||||
driverAdapterCPDS.setUrl(dataSourceProperties.getProperty("url"));
|
||||
driverAdapterCPDS.setUser(dataSourceProperties.getProperty("username"));
|
||||
driverAdapterCPDS.setPassword(dataSourceProperties.getProperty("password"));
|
||||
|
||||
try {
|
||||
driverAdapterCPDS.setDriver(dataSourceProperties.getProperty("driverClass"));
|
||||
} catch (Exception e) {
|
||||
throw new ConfigurationException("Failed to set driver for data source", e);
|
||||
}
|
||||
|
||||
driverAdapterCPDS
|
||||
.setMaxPreparedStatements(Integer.valueOf(dataSourceProperties.getProperty("maxPreparedStatements")));
|
||||
driverAdapterCPDS.setMaxIdle(Integer.valueOf(dataSourceProperties.getProperty("maxIdlePreparedStatements")));
|
||||
driverAdapterCPDS
|
||||
.setPoolPreparedStatements(Boolean.valueOf(dataSourceProperties.getProperty("poolPreparedStatements")));
|
||||
|
||||
final SharedPoolDataSource poolDataSource = new SharedPoolDataSource();
|
||||
poolDataSource.setDefaultMaxIdle(Integer.valueOf(dataSourceProperties.getProperty("maxIdleConnections", "8")));
|
||||
poolDataSource
|
||||
.setDefaultMaxTotal(Integer.valueOf(dataSourceProperties.getProperty("maxTotalConnections", "8")));
|
||||
poolDataSource.setConnectionPoolDataSource(driverAdapterCPDS);
|
||||
poolDataSource.setDefaultMaxWaitMillis(Integer.valueOf(dataSourceProperties.getProperty("maxWaitMs")));
|
||||
poolDataSource.setDefaultTransactionIsolation(
|
||||
Integer.valueOf(dataSourceProperties.getProperty("defaultTransactionIsolation")));
|
||||
poolDataSource.setDefaultReadOnly(Boolean.valueOf(dataSourceProperties.getProperty("defaultReadOnly")));
|
||||
poolDataSource.setDefaultTestOnCreate(Boolean.valueOf(dataSourceProperties.getProperty("testOnCreate")));
|
||||
poolDataSource.setDefaultTestOnBorrow(Boolean.valueOf(dataSourceProperties.getProperty("testOnBorrow")));
|
||||
poolDataSource.setDefaultTestOnReturn(Boolean.valueOf(dataSourceProperties.getProperty("testOnReturn")));
|
||||
poolDataSource.setDefaultTestWhileIdle(Boolean.valueOf(dataSourceProperties.getProperty("testWhileIdle")));
|
||||
poolDataSource.setValidationQuery("SELECT 0");
|
||||
|
||||
// //wrap original datasource so that TimedInterface.newProxy picks up
|
||||
// correct classloader
|
||||
String datasourceId = getDataSourceName();
|
||||
DataSourceInSpringClassloader wrappedObj = new DataSourceInSpringClassloader(datasourceId, poolDataSource);
|
||||
|
||||
Monitors.registerObject(datasourceId, this);
|
||||
|
||||
BasicGauge<Integer> numberOfActiveJDBCConnections = new BasicGauge<>(
|
||||
MonitorConfig.builder(getDataSourceName() + "-numberOfActiveJDBCConnections").withTags(tags).build(),
|
||||
new Callable<Integer>() {
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
return poolDataSource.getNumActive();
|
||||
}
|
||||
});
|
||||
|
||||
DefaultMonitorRegistry.getInstance().register(numberOfActiveJDBCConnections);
|
||||
|
||||
BasicGauge<Integer> numberOfIdleJDBCConnections = new BasicGauge<>(
|
||||
MonitorConfig.builder(getDataSourceName() + "-numberOfIdleJDBCConnections").withTags(tags).build(),
|
||||
new Callable<Integer>() {
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
return poolDataSource.getNumIdle();
|
||||
}
|
||||
});
|
||||
|
||||
DefaultMonitorRegistry.getInstance().register(numberOfIdleJDBCConnections);
|
||||
|
||||
return wrappedObj;
|
||||
// //wrap and register this object to produce JMX metrics
|
||||
// DataSource ret =
|
||||
// TimedInterface.newProxy(DataSourceInSpringClassloaderInterface.class,
|
||||
// wrappedObj, "JDBCDatasourcePool");
|
||||
// DefaultMonitorRegistry.getInstance().register((CompositeMonitor)ret);
|
||||
//
|
||||
// return ret;
|
||||
}
|
||||
|
||||
public abstract String getDataSourceName();
|
||||
|
||||
/**
|
||||
* Get the Key Column Converter base on setting
|
||||
*
|
||||
* @return the resulting converter
|
||||
*/
|
||||
public BaseKeyColumnConverter getKeyColumnConverter() {
|
||||
Properties dataSourceProperties = getDataSourceProperties();
|
||||
String name = dataSourceProperties.getProperty("keyColConversionClass");
|
||||
try {
|
||||
if (null != name) {
|
||||
Class<?> clazz = Class.forName(name);
|
||||
Constructor<?> constructor = clazz.getConstructor();
|
||||
return (BaseKeyColumnConverter) constructor.newInstance();
|
||||
} else {
|
||||
return new KeyColumnConverter();
|
||||
}
|
||||
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException | IllegalArgumentException
|
||||
| InvocationTargetException | NoSuchMethodException | SecurityException e) {
|
||||
throw new ConfigurationException("Failed to set up keyColConversionClass for datasource", e);
|
||||
}
|
||||
}
|
||||
|
||||
public Properties getDataSourceProperties() {
|
||||
|
||||
Properties p = new Properties();
|
||||
|
||||
p.setProperty("url", environment.getProperty(getDataSourceName() + ".url",
|
||||
"jdbc:postgresql://postgres-test-instance.crwckwjetrxv.us-east-1.rds.amazonaws.com:5432/testdb"));
|
||||
p.setProperty("username", environment.getProperty(getDataSourceName() + ".username", "testdb"));
|
||||
|
||||
p.setProperty("driverClass",
|
||||
environment.getProperty(getDataSourceName() + ".driverClass", "org.postgresql.Driver"));
|
||||
|
||||
p.setProperty("maxTotalConnections",
|
||||
environment.getProperty(getDataSourceName() + ".maxTotalConnections", "8"));
|
||||
p.setProperty("maxIdleConnections", environment.getProperty(getDataSourceName() + ".maxIdleConnections", "8"));
|
||||
|
||||
p.setProperty("maxPreparedStatements",
|
||||
environment.getProperty(getDataSourceName() + ".maxPreparedStatements", "200"));
|
||||
p.setProperty("maxIdlePreparedStatements",
|
||||
environment.getProperty(getDataSourceName() + ".maxIdlePreparedStatements", "200"));
|
||||
p.setProperty("poolPreparedStatements",
|
||||
environment.getProperty(getDataSourceName() + ".poolPreparedStatements", "true"));
|
||||
|
||||
p.setProperty("maxWaitMs", environment.getProperty(getDataSourceName() + ".maxWaitMs", "1000"));
|
||||
p.setProperty("defaultTransactionIsolation",
|
||||
environment.getProperty(getDataSourceName() + ".defaultTransactionIsolation",
|
||||
String.valueOf(Connection.TRANSACTION_READ_COMMITTED)));
|
||||
p.setProperty("defaultReadOnly", environment.getProperty(getDataSourceName() + ".defaultReadOnly", "false"));
|
||||
p.setProperty("testOnCreate", environment.getProperty(getDataSourceName() + ".testOnCreate", "true"));
|
||||
p.setProperty("testOnBorrow", environment.getProperty(getDataSourceName() + ".testOnBorrow", "true"));
|
||||
p.setProperty("testOnReturn", environment.getProperty(getDataSourceName() + ".testOnReturn", "true"));
|
||||
p.setProperty("testWhileIdle", environment.getProperty(getDataSourceName() + ".testWhileIdle", "true"));
|
||||
p.setProperty("keyColConversionClass", environment.getProperty(getDataSourceName() + ".keyColConversionClass",
|
||||
"com.telecominfraproject.wlan.core.server.jdbc.KeyColumnLowerCaseConverter"));
|
||||
String password = environment.getProperty(getDataSourceName() + ".password", "testdb");
|
||||
p.setProperty("passwordHash", DigestUtils.sha256Hex(password));
|
||||
LOG.info("Loaded properties for {} datasource from {}: {}", getDataSourceName(),
|
||||
environment.getProperty(getDataSourceName() + ".props"), p);
|
||||
// not logging password
|
||||
p.setProperty("password", password);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
||||
import org.springframework.jdbc.datasource.DelegatingDataSource;
|
||||
|
||||
/**
|
||||
* A delegating {@link DataSource} with other extensions
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class BaseJDbcDataSource extends DelegatingDataSource {
|
||||
|
||||
private final BaseKeyColumnConverter keyConvertor;
|
||||
|
||||
public BaseJDbcDataSource(DataSource targetDataSource, BaseKeyColumnConverter targetConverter) {
|
||||
super(targetDataSource);
|
||||
this.keyConvertor = targetConverter;
|
||||
}
|
||||
|
||||
public BaseKeyColumnConverter getKeyColumnConverter() {
|
||||
return keyConvertor;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,118 @@
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.jdbc.core.JdbcOperations;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import com.telecominfraproject.wlan.server.exceptions.GenericErrorException;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public abstract class BaseJdbcDao {
|
||||
/**
|
||||
* Wait to up to 5 seconds to catch up with current last mod
|
||||
*/
|
||||
private static final long NEXT_LASTMOD_WAIT_THRESHOLD = 5;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseJdbcDao.class);
|
||||
|
||||
protected JdbcOperations jdbcTemplate;
|
||||
|
||||
/**
|
||||
* Key converter used for converting generated key column name
|
||||
*/
|
||||
protected BaseKeyColumnConverter keyColumnConverter;
|
||||
|
||||
private boolean skipCheckForConcurrentUpdates;
|
||||
|
||||
|
||||
@Autowired(required=false)
|
||||
public void setDataSource(DataSource dataSource) {
|
||||
if(this.jdbcTemplate==null){
|
||||
LOG.debug("{} uses datasource {}", this.getClass().getSimpleName(), dataSource);
|
||||
JdbcTemplate jt = new JdbcTemplate(dataSource, false);
|
||||
|
||||
//wrap and register jdbcTemplate object to produce JMX metrics
|
||||
// JdbcOperations ret = TimedInterface.newProxy(JdbcOperations.class, jt, "JdbcTemplate-"+this.getClass().getSimpleName());
|
||||
// DefaultMonitorRegistry.getInstance().register((CompositeMonitor)ret);
|
||||
|
||||
//build user-friendly metrics Id - remove $$EnhancedByCGlib... at the end of the class name
|
||||
String metricsId = this.getClass().getSimpleName();
|
||||
int idx = metricsId.indexOf('$');
|
||||
if(idx>0){
|
||||
metricsId = metricsId.substring(0, idx);
|
||||
}
|
||||
|
||||
JdbcOperations ret = new JdbcOperationsWithMetrics(jt, metricsId);
|
||||
|
||||
this.jdbcTemplate = ret;
|
||||
}
|
||||
|
||||
if (this.keyColumnConverter == null) {
|
||||
if (dataSource instanceof BaseJDbcDataSource) {
|
||||
this.keyColumnConverter = ( (BaseJDbcDataSource) dataSource).getKeyColumnConverter();
|
||||
}
|
||||
|
||||
if (null == this.keyColumnConverter) {
|
||||
// use default one
|
||||
this.keyColumnConverter = new KeyColumnConverter();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Value("${skipCheckForConcurrentUpdates:false}")
|
||||
private void setSkipCheckForConcurrentUpdates(String skipCheckForConcurrentUpdatesStr) {
|
||||
this.skipCheckForConcurrentUpdates = Boolean.parseBoolean(skipCheckForConcurrentUpdatesStr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this method only for testing.
|
||||
* Normally the value for this property is set via application.properties or via -DskipCheckForConcurrentUpdates=true
|
||||
* Default value is false, which means to USE checks for concurrent updates.
|
||||
* @param skipCheckForConcurrentUpdates
|
||||
*/
|
||||
public void setSkipCheckForConcurrentUpdates(boolean skipCheckForConcurrentUpdates) {
|
||||
this.skipCheckForConcurrentUpdates = skipCheckForConcurrentUpdates;
|
||||
}
|
||||
|
||||
public boolean isSkipCheckForConcurrentUpdates() {
|
||||
return skipCheckForConcurrentUpdates;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the last modified timestamp based on the current one
|
||||
*
|
||||
* @param currentLastModTs
|
||||
* @return new last modified TS
|
||||
*/
|
||||
protected static long getNewLastModTs(long currentLastModTs) {
|
||||
long result = System.currentTimeMillis();
|
||||
while (result <= currentLastModTs) {
|
||||
long diff = currentLastModTs - result;
|
||||
if (diff > TimeUnit.SECONDS.toMillis(NEXT_LASTMOD_WAIT_THRESHOLD)) {
|
||||
throw new GenericErrorException("Existing last modified TS is in the future");
|
||||
}
|
||||
if (diff > 0) {
|
||||
// pause till we have a time great than current lastMod
|
||||
try {
|
||||
Thread.sleep(diff + 1);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new GenericErrorException("Unable to generate the new last modified TS", e);
|
||||
}
|
||||
}
|
||||
result = System.currentTimeMillis();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
/**
|
||||
* When column name is used for collecting auto generated key. different data
|
||||
* source has different behavior. For example, embedded HSQL expected it in
|
||||
* upper case. PostgreSQL expected it in lower case.
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public interface BaseKeyColumnConverter {
|
||||
|
||||
/**
|
||||
* Return list of column names
|
||||
*
|
||||
* @param columnNames
|
||||
* @return converted name
|
||||
*/
|
||||
public String[] getKeyColumnName(final String[] columnNames);
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
/**
|
||||
* Class to define 'magic' numbers that are used in all DAO implementations.
|
||||
*
|
||||
* @author mpreston
|
||||
*/
|
||||
public class DataSourceConstants {
|
||||
|
||||
public static final int STRING_SIZE_128 = 128;
|
||||
public static final int STRING_SIZE_512 = 512;
|
||||
public static final int STRING_SIZE_1024 = 1024;
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
/**
|
||||
* Delegate KeyColumnConverter serves as a wrapper
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class DelegateKeyColumnConverter implements BaseKeyColumnConverter {
|
||||
|
||||
private final BaseKeyColumnConverter delegate;
|
||||
|
||||
protected DelegateKeyColumnConverter(BaseKeyColumnConverter delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see com.telecominfraproject.wlan.core.server.jdbc.BaseKeyColumnConvertor#getKeyColumnName(java.lang.String[])
|
||||
*/
|
||||
@Override
|
||||
public String[] getKeyColumnName(String[] columnNames) {
|
||||
return delegate.getKeyColumnName(columnNames);
|
||||
}
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,29 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
/**
|
||||
* Default converter, does nothing
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class KeyColumnConverter implements BaseKeyColumnConverter {
|
||||
/**
|
||||
* Return list of column names
|
||||
*
|
||||
* @param columnNames
|
||||
* @return converted name
|
||||
*/
|
||||
/**
|
||||
* Return list of column names
|
||||
*
|
||||
* @param columnNames
|
||||
* @return converted name
|
||||
*/
|
||||
@Override
|
||||
public String[] getKeyColumnName(final String[] columnNames) {
|
||||
return columnNames;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
/**
|
||||
* Convert all column name to lower case.
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class KeyColumnLowerCaseConverter implements BaseKeyColumnConverter {
|
||||
/**
|
||||
* Return list of column names in lower case.
|
||||
*
|
||||
* @param columnNames
|
||||
* @return converted name
|
||||
*/
|
||||
@Override
|
||||
public String[] getKeyColumnName(final String[] columnNames) {
|
||||
// empty
|
||||
if (null == columnNames || (0 == columnNames.length)) {
|
||||
return null;
|
||||
}
|
||||
String[] result = new String[columnNames.length];
|
||||
for (int i = 0; i < columnNames.length; ++i) {
|
||||
result[i] = columnNames[i].toLowerCase();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
/**
|
||||
* Convert all column name to upper case.
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class KeyColumnUpperCaseConverter implements BaseKeyColumnConverter {
|
||||
/**
|
||||
* Return list of column names in upper case
|
||||
*
|
||||
* @param columnNames
|
||||
* @return converted name
|
||||
*/
|
||||
@Override
|
||||
public String[] getKeyColumnName(final String[] columnNames) {
|
||||
// empty
|
||||
if (null == columnNames || (0 == columnNames.length)) {
|
||||
return null;
|
||||
}
|
||||
String[] result = new String[columnNames.length];
|
||||
for (int i = 0; i < columnNames.length; ++i) {
|
||||
result[i] = columnNames[i].toUpperCase();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package com.telecominfraproject.wlan.core.server.jdbc;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Primary;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
import org.springframework.context.annotation.PropertySource;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.telecominfraproject.wlan.core.server.jdbc.BaseDataSourceConfig;
|
||||
|
||||
/**
|
||||
* @author dtoptygin
|
||||
*
|
||||
*/
|
||||
@Component
|
||||
@Profile("use_single_ds")
|
||||
@PropertySource({ "${singleDataSource.props:classpath:singleDataSource.properties}" })
|
||||
public class SingleDataSourceConfig extends BaseDataSourceConfig {
|
||||
|
||||
@Profile("!use_embedded_db")
|
||||
@Bean
|
||||
@Primary
|
||||
public DataSource dataSource(){
|
||||
return new BaseJDbcDataSource(super.getDataSource(), super.getKeyColumnConverter());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDataSourceName() {
|
||||
return "singleDataSource";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.commons.dbcp2.cpdsadapter;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.dbcp2.PStmtKey;
|
||||
|
||||
/**
|
||||
* A key uniquely identifying a {@link java.sql.PreparedStatement PreparedStatement}.
|
||||
* @version $Id: PStmtKeyCPDS.java 1649430 2015-01-04 21:29:32Z tn $
|
||||
* @since 2.0
|
||||
*/
|
||||
public class PStmtKeyCPDS extends PStmtKey {
|
||||
private final Integer _resultSetHoldability;
|
||||
private final int _columnIndexes[];
|
||||
private final String _columnNames[];
|
||||
|
||||
public PStmtKeyCPDS(String sql) {
|
||||
super(sql);
|
||||
_resultSetHoldability = null;
|
||||
_columnIndexes = null;
|
||||
_columnNames = null;
|
||||
}
|
||||
|
||||
public PStmtKeyCPDS(String sql, int autoGeneratedKeys) {
|
||||
super(sql, null, autoGeneratedKeys);
|
||||
_resultSetHoldability = null;
|
||||
_columnIndexes = null;
|
||||
_columnNames = null;
|
||||
}
|
||||
|
||||
public PStmtKeyCPDS(String sql, int resultSetType, int resultSetConcurrency) {
|
||||
super(sql, resultSetType, resultSetConcurrency);
|
||||
_resultSetHoldability = null;
|
||||
_columnIndexes = null;
|
||||
_columnNames = null;
|
||||
}
|
||||
|
||||
public PStmtKeyCPDS(String sql, int resultSetType, int resultSetConcurrency,
|
||||
int resultSetHoldability) {
|
||||
super(sql, resultSetType, resultSetConcurrency);
|
||||
_resultSetHoldability = Integer.valueOf(resultSetHoldability);
|
||||
_columnIndexes = null;
|
||||
_columnNames = null;
|
||||
}
|
||||
|
||||
public PStmtKeyCPDS(String sql, int columnIndexes[]) {
|
||||
super(sql);
|
||||
_columnIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length);
|
||||
_resultSetHoldability = null;
|
||||
_columnNames = null;
|
||||
}
|
||||
|
||||
public PStmtKeyCPDS(String sql, String columnNames[]) {
|
||||
super(sql);
|
||||
_columnNames = Arrays.copyOf(columnNames, columnNames.length);
|
||||
_resultSetHoldability = null;
|
||||
_columnIndexes = null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (!super.equals(obj)) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
PStmtKeyCPDS other = (PStmtKeyCPDS) obj;
|
||||
if (!Arrays.equals(getColumnIndexes(), other.getColumnIndexes())) {
|
||||
return false;
|
||||
}
|
||||
if (!Arrays.equals(getColumnNames(), other.getColumnNames())) {
|
||||
return false;
|
||||
}
|
||||
if (getResultSetHoldability() == null) {
|
||||
if (other.getResultSetHoldability() != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!getResultSetHoldability().equals(other.getResultSetHoldability())) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = super.hashCode();
|
||||
result = prime * result + Arrays.hashCode(getColumnIndexes());
|
||||
result = prime * result + Arrays.hashCode(getColumnNames());
|
||||
result = prime * result + (getResultSetHoldability() == null ? 0 : getResultSetHoldability().hashCode());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("PStmtKey: sql=");
|
||||
buf.append(getSql());
|
||||
buf.append(", catalog=");
|
||||
buf.append(getCatalog());
|
||||
buf.append(", resultSetType=");
|
||||
buf.append(getResultSetType());
|
||||
buf.append(", resultSetConcurrency=");
|
||||
buf.append(getResultSetConcurrency());
|
||||
buf.append(", statmentType=");
|
||||
buf.append(getStmtType());
|
||||
buf.append(", resultSetHoldability=");
|
||||
buf.append(getResultSetHoldability());
|
||||
buf.append(", columnIndexes=");
|
||||
buf.append(Arrays.toString(getColumnIndexes()));
|
||||
buf.append(", columnNames=");
|
||||
buf.append(Arrays.toString(getColumnNames()));
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
//** Art2wave BEGIN
|
||||
public int[] getColumnIndexes() {
|
||||
return _columnIndexes;
|
||||
}
|
||||
|
||||
public String[] getColumnNames() {
|
||||
return _columnNames;
|
||||
}
|
||||
|
||||
public Integer getResultSetHoldability() {
|
||||
return _resultSetHoldability;
|
||||
}
|
||||
//** Art2wave END
|
||||
}
|
||||
|
||||
@@ -0,0 +1,532 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.commons.dbcp2.cpdsadapter;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Vector;
|
||||
|
||||
import javax.sql.ConnectionEvent;
|
||||
import javax.sql.ConnectionEventListener;
|
||||
import javax.sql.PooledConnection;
|
||||
import javax.sql.StatementEventListener;
|
||||
|
||||
import org.apache.commons.dbcp2.DelegatingConnection;
|
||||
import org.apache.commons.dbcp2.PoolablePreparedStatement;
|
||||
import org.apache.commons.pool2.KeyedObjectPool;
|
||||
import org.apache.commons.pool2.KeyedPooledObjectFactory;
|
||||
import org.apache.commons.pool2.PooledObject;
|
||||
import org.apache.commons.pool2.impl.DefaultPooledObject;
|
||||
|
||||
/**
|
||||
* Implementation of PooledConnection that is returned by
|
||||
* PooledConnectionDataSource.
|
||||
*
|
||||
* @author John D. McNally
|
||||
* @version $Revision: 1572242 $ $Date: 2014-02-26 12:34:39 -0800 (Wed, 26 Feb 2014) $
|
||||
* @since 2.0
|
||||
*/
|
||||
class PooledConnectionImpl implements PooledConnection,
|
||||
KeyedPooledObjectFactory<PStmtKeyCPDS,PoolablePreparedStatement<PStmtKeyCPDS>> {
|
||||
|
||||
private static final String CLOSED
|
||||
= "Attempted to use PooledConnection after closed() was called.";
|
||||
|
||||
/**
|
||||
* The JDBC database connection that represents the physical db connection.
|
||||
*/
|
||||
private Connection connection = null;
|
||||
|
||||
/**
|
||||
* A DelegatingConnection used to create a PoolablePreparedStatementStub
|
||||
*/
|
||||
private final DelegatingConnection<?> delegatingConnection;
|
||||
|
||||
/**
|
||||
* The JDBC database logical connection.
|
||||
*/
|
||||
private Connection logicalConnection = null;
|
||||
|
||||
/**
|
||||
* ConnectionEventListeners
|
||||
*/
|
||||
private final Vector<ConnectionEventListener> eventListeners;
|
||||
|
||||
/**
|
||||
* StatementEventListeners
|
||||
*/
|
||||
private final Vector<StatementEventListener> statementEventListeners =
|
||||
new Vector<>();
|
||||
|
||||
/**
|
||||
* flag set to true, once close() is called.
|
||||
*/
|
||||
private boolean isClosed;
|
||||
|
||||
/** My pool of {*link PreparedStatement}s. */
|
||||
private KeyedObjectPool<PStmtKeyCPDS, PoolablePreparedStatement<PStmtKeyCPDS>> pstmtPool = null;
|
||||
|
||||
/**
|
||||
* Controls access to the underlying connection
|
||||
*/
|
||||
private boolean accessToUnderlyingConnectionAllowed = false;
|
||||
|
||||
/**
|
||||
* Wrap the real connection.
|
||||
* @param connection the connection to be wrapped
|
||||
*/
|
||||
PooledConnectionImpl(Connection connection) {
|
||||
this.connection = connection;
|
||||
if (connection instanceof DelegatingConnection) {
|
||||
this.delegatingConnection = (DelegatingConnection<?>) connection;
|
||||
} else {
|
||||
this.delegatingConnection = new DelegatingConnection<>(connection);
|
||||
}
|
||||
eventListeners = new Vector<>();
|
||||
isClosed = false;
|
||||
}
|
||||
|
||||
public void setStatementPool(
|
||||
KeyedObjectPool<PStmtKeyCPDS, PoolablePreparedStatement<PStmtKeyCPDS>> statementPool) {
|
||||
pstmtPool = statementPool;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void addConnectionEventListener(ConnectionEventListener listener) {
|
||||
if (!eventListeners.contains(listener)) {
|
||||
eventListeners.add(listener);
|
||||
}
|
||||
}
|
||||
|
||||
/* JDBC_4_ANT_KEY_BEGIN */
|
||||
@Override
|
||||
public void addStatementEventListener(StatementEventListener listener) {
|
||||
if (!statementEventListeners.contains(listener)) {
|
||||
statementEventListeners.add(listener);
|
||||
}
|
||||
}
|
||||
/* JDBC_4_ANT_KEY_END */
|
||||
|
||||
/**
|
||||
* Closes the physical connection and marks this
|
||||
* <code>PooledConnection</code> so that it may not be used
|
||||
* to generate any more logical <code>Connection</code>s.
|
||||
*
|
||||
* @exception SQLException if an error occurs or the connection is already closed
|
||||
*/
|
||||
@Override
|
||||
public void close() throws SQLException {
|
||||
assertOpen();
|
||||
isClosed = true;
|
||||
try {
|
||||
if (pstmtPool != null) {
|
||||
try {
|
||||
pstmtPool.close();
|
||||
} finally {
|
||||
pstmtPool = null;
|
||||
}
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new SQLException("Cannot close connection (return to pool failed)", e);
|
||||
} finally {
|
||||
try {
|
||||
connection.close();
|
||||
} finally {
|
||||
connection = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an SQLException, if isClosed is true
|
||||
*/
|
||||
private void assertOpen() throws SQLException {
|
||||
if (isClosed) {
|
||||
throw new SQLException(CLOSED);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a JDBC connection.
|
||||
*
|
||||
* @return The database connection.
|
||||
* @throws SQLException if the connection is not open or the previous logical connection is still open
|
||||
*/
|
||||
@Override
|
||||
public Connection getConnection() throws SQLException {
|
||||
assertOpen();
|
||||
// make sure the last connection is marked as closed
|
||||
if (logicalConnection != null && !logicalConnection.isClosed()) {
|
||||
// should notify pool of error so the pooled connection can
|
||||
// be removed !FIXME!
|
||||
throw new SQLException("PooledConnection was reused, without"
|
||||
+ "its previous Connection being closed.");
|
||||
}
|
||||
|
||||
// the spec requires that this return a new Connection instance.
|
||||
logicalConnection = new ConnectionImpl(
|
||||
this, connection, isAccessToUnderlyingConnectionAllowed());
|
||||
return logicalConnection;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void removeConnectionEventListener(
|
||||
ConnectionEventListener listener) {
|
||||
eventListeners.remove(listener);
|
||||
}
|
||||
|
||||
/* JDBC_4_ANT_KEY_BEGIN */
|
||||
@Override
|
||||
public void removeStatementEventListener(StatementEventListener listener) {
|
||||
statementEventListeners.remove(listener);
|
||||
}
|
||||
/* JDBC_4_ANT_KEY_END */
|
||||
|
||||
/**
|
||||
* Closes the physical connection and checks that the logical connection
|
||||
* was closed as well.
|
||||
*/
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
// Closing the Connection ensures that if anyone tries to use it,
|
||||
// an error will occur.
|
||||
try {
|
||||
connection.close();
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
|
||||
// make sure the last connection is marked as closed
|
||||
if (logicalConnection != null && !logicalConnection.isClosed()) {
|
||||
throw new SQLException("PooledConnection was gc'ed, without"
|
||||
+ "its last Connection being closed.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* sends a connectionClosed event.
|
||||
*/
|
||||
void notifyListeners() {
|
||||
ConnectionEvent event = new ConnectionEvent(this);
|
||||
Object[] listeners = eventListeners.toArray();
|
||||
for (Object listener : listeners) {
|
||||
((ConnectionEventListener) listener).connectionClosed(event);
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// The following code implements a PreparedStatement pool
|
||||
|
||||
/**
|
||||
* Create or obtain a {@link PreparedStatement} from my pool.
|
||||
* @param sql the SQL statement
|
||||
* @return a {@link PoolablePreparedStatement}
|
||||
*/
|
||||
PreparedStatement prepareStatement(String sql) throws SQLException {
|
||||
if (pstmtPool == null) {
|
||||
return connection.prepareStatement(sql);
|
||||
}
|
||||
try {
|
||||
return pstmtPool.borrowObject(createKey(sql));
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new SQLException("Borrow prepareStatement from pool failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create or obtain a {@link PreparedStatement} from my pool.
|
||||
* @param sql a <code>String</code> object that is the SQL statement to
|
||||
* be sent to the database; may contain one or more '?' IN
|
||||
* parameters
|
||||
* @param resultSetType a result set type; one of
|
||||
* <code>ResultSet.TYPE_FORWARD_ONLY</code>,
|
||||
* <code>ResultSet.TYPE_SCROLL_INSENSITIVE</code>, or
|
||||
* <code>ResultSet.TYPE_SCROLL_SENSITIVE</code>
|
||||
* @param resultSetConcurrency a concurrency type; one of
|
||||
* <code>ResultSet.CONCUR_READ_ONLY</code> or
|
||||
* <code>ResultSet.CONCUR_UPDATABLE</code>
|
||||
*
|
||||
* @return a {@link PoolablePreparedStatement}
|
||||
* @see Connection#prepareStatement(String, int, int)
|
||||
*/
|
||||
PreparedStatement prepareStatement(String sql, int resultSetType,
|
||||
int resultSetConcurrency)
|
||||
throws SQLException {
|
||||
if (pstmtPool == null) {
|
||||
return connection.prepareStatement(sql, resultSetType, resultSetConcurrency);
|
||||
}
|
||||
try {
|
||||
return pstmtPool.borrowObject(
|
||||
createKey(sql,resultSetType,resultSetConcurrency));
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new SQLException("Borrow prepareStatement from pool failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create or obtain a {@link PreparedStatement} from my pool.
|
||||
* @param sql an SQL statement that may contain one or more '?' IN
|
||||
* parameter placeholders
|
||||
* @param autoGeneratedKeys a flag indicating whether auto-generated keys
|
||||
* should be returned; one of
|
||||
* <code>Statement.RETURN_GENERATED_KEYS</code> or
|
||||
* <code>Statement.NO_GENERATED_KEYS</code>
|
||||
* @return a {@link PoolablePreparedStatement}
|
||||
* @see Connection#prepareStatement(String, int)
|
||||
*/
|
||||
PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)
|
||||
throws SQLException {
|
||||
if (pstmtPool == null) {
|
||||
return connection.prepareStatement(sql, autoGeneratedKeys);
|
||||
}
|
||||
try {
|
||||
return pstmtPool.borrowObject(createKey(sql,autoGeneratedKeys));
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new SQLException("Borrow prepareStatement from pool failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
PreparedStatement prepareStatement(String sql, int resultSetType,
|
||||
int resultSetConcurrency, int resultSetHoldability)
|
||||
throws SQLException {
|
||||
if (pstmtPool == null) {
|
||||
return connection.prepareStatement(sql, resultSetType,
|
||||
resultSetConcurrency, resultSetHoldability);
|
||||
}
|
||||
try {
|
||||
return pstmtPool.borrowObject(createKey(sql, resultSetType,
|
||||
resultSetConcurrency, resultSetHoldability));
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new SQLException("Borrow prepareStatement from pool failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
PreparedStatement prepareStatement(String sql, int columnIndexes[])
|
||||
throws SQLException {
|
||||
if (pstmtPool == null) {
|
||||
return connection.prepareStatement(sql, columnIndexes);
|
||||
}
|
||||
try {
|
||||
return pstmtPool.borrowObject(createKey(sql, columnIndexes));
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new SQLException("Borrow prepareStatement from pool failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
PreparedStatement prepareStatement(String sql, String columnNames[])
|
||||
throws SQLException {
|
||||
if (pstmtPool == null) {
|
||||
return connection.prepareStatement(sql, columnNames);
|
||||
}
|
||||
try {
|
||||
return pstmtPool.borrowObject(createKey(sql, columnNames));
|
||||
} catch (RuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new SQLException("Borrow prepareStatement from pool failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {*link PooledConnectionImpl.PStmtKey} for the given arguments.
|
||||
*/
|
||||
protected PStmtKeyCPDS createKey(String sql, int autoGeneratedKeys) {
|
||||
return new PStmtKeyCPDS(normalizeSQL(sql), autoGeneratedKeys);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {*link PooledConnectionImpl.PStmtKey} for the given arguments.
|
||||
*/
|
||||
protected PStmtKeyCPDS createKey(String sql, int resultSetType,
|
||||
int resultSetConcurrency, int resultSetHoldability) {
|
||||
return new PStmtKeyCPDS(normalizeSQL(sql), resultSetType,
|
||||
resultSetConcurrency, resultSetHoldability);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {*link PooledConnectionImpl.PStmtKey} for the given arguments.
|
||||
*/
|
||||
protected PStmtKeyCPDS createKey(String sql, int columnIndexes[]) {
|
||||
return new PStmtKeyCPDS(normalizeSQL(sql), columnIndexes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {*link PooledConnectionImpl.PStmtKey} for the given arguments.
|
||||
*/
|
||||
protected PStmtKeyCPDS createKey(String sql, String columnNames[]) {
|
||||
return new PStmtKeyCPDS(normalizeSQL(sql), columnNames);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {*link PooledConnectionImpl.PStmtKey} for the given arguments.
|
||||
*/
|
||||
protected PStmtKeyCPDS createKey(String sql, int resultSetType,
|
||||
int resultSetConcurrency) {
|
||||
return new PStmtKeyCPDS(normalizeSQL(sql), resultSetType,
|
||||
resultSetConcurrency);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {*link PooledConnectionImpl.PStmtKey} for the given arguments.
|
||||
*/
|
||||
protected PStmtKeyCPDS createKey(String sql) {
|
||||
return new PStmtKeyCPDS(normalizeSQL(sql));
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize the given SQL statement, producing a
|
||||
* cannonical form that is semantically equivalent to the original.
|
||||
*/
|
||||
protected String normalizeSQL(String sql) {
|
||||
return sql.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* My {*link KeyedPoolableObjectFactory} method for creating
|
||||
* {*link PreparedStatement}s.
|
||||
* @param key the key for the {*link PreparedStatement} to be created
|
||||
*/
|
||||
@Override
|
||||
public PooledObject<PoolablePreparedStatement<PStmtKeyCPDS>> makeObject(PStmtKeyCPDS key) throws Exception {
|
||||
if (null == key) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
// _openPstmts++;
|
||||
if (null == key.getResultSetType()
|
||||
&& null == key.getResultSetConcurrency()) {
|
||||
//** Art2wave BEGIN
|
||||
if (null != key.getAutoGeneratedKeys()) {
|
||||
return new DefaultPooledObject<>(new PoolablePreparedStatement<>(
|
||||
connection.prepareStatement(key.getSql(),
|
||||
key.getAutoGeneratedKeys().intValue()),
|
||||
key, pstmtPool, delegatingConnection));
|
||||
}
|
||||
if (null != key.getColumnNames()) {
|
||||
return new DefaultPooledObject<>(new PoolablePreparedStatement<>(
|
||||
connection.prepareStatement(key.getSql(), key.getColumnNames()),
|
||||
key, pstmtPool, delegatingConnection));
|
||||
}
|
||||
if (null != key.getColumnIndexes()) {
|
||||
return new DefaultPooledObject<>(new PoolablePreparedStatement<>(
|
||||
connection.prepareStatement(key.getSql(), key.getColumnIndexes()),
|
||||
key, pstmtPool, delegatingConnection));
|
||||
}
|
||||
return new DefaultPooledObject<>(new PoolablePreparedStatement<>(
|
||||
connection.prepareStatement(key.getSql()),
|
||||
key, pstmtPool, delegatingConnection));
|
||||
//** Art2wave END
|
||||
}
|
||||
return new DefaultPooledObject<>(new PoolablePreparedStatement<>(
|
||||
connection.prepareStatement(key.getSql(),
|
||||
key.getResultSetType().intValue(),
|
||||
key.getResultSetConcurrency().intValue()),
|
||||
key, pstmtPool, delegatingConnection));
|
||||
}
|
||||
|
||||
/**
|
||||
* My {*link KeyedPoolableObjectFactory} method for destroying
|
||||
* {*link PreparedStatement}s.
|
||||
* @param key ignored
|
||||
* @param p the wrapped {*link PreparedStatement} to be destroyed.
|
||||
*/
|
||||
@Override
|
||||
public void destroyObject(PStmtKeyCPDS key,
|
||||
PooledObject<PoolablePreparedStatement<PStmtKeyCPDS>> p)
|
||||
throws Exception {
|
||||
p.getObject().getInnermostDelegate().close();
|
||||
}
|
||||
|
||||
/**
|
||||
* My {*link KeyedPoolableObjectFactory} method for validating
|
||||
* {*link PreparedStatement}s.
|
||||
* @param key ignored
|
||||
* @param p ignored
|
||||
* @return <tt>true</tt>
|
||||
*/
|
||||
@Override
|
||||
public boolean validateObject(PStmtKeyCPDS key,
|
||||
PooledObject<PoolablePreparedStatement<PStmtKeyCPDS>> p) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* My {*link KeyedPoolableObjectFactory} method for activating
|
||||
* {*link PreparedStatement}s.
|
||||
* @param key ignored
|
||||
* @param p ignored
|
||||
*/
|
||||
@Override
|
||||
public void activateObject(PStmtKeyCPDS key,
|
||||
PooledObject<PoolablePreparedStatement<PStmtKeyCPDS>> p)
|
||||
throws Exception {
|
||||
p.getObject().activate();
|
||||
}
|
||||
|
||||
/**
|
||||
* My {*link KeyedPoolableObjectFactory} method for passivating
|
||||
* {*link PreparedStatement}s. Currently invokes {*link PreparedStatement#clearParameters}.
|
||||
* @param key ignored
|
||||
* @param p a wrapped {*link PreparedStatement}
|
||||
*/
|
||||
@Override
|
||||
public void passivateObject(PStmtKeyCPDS key,
|
||||
PooledObject<PoolablePreparedStatement<PStmtKeyCPDS>> p)
|
||||
throws Exception {
|
||||
PoolablePreparedStatement<PStmtKeyCPDS> ppss = p.getObject();
|
||||
ppss.clearParameters();
|
||||
ppss.passivate();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value of the accessToUnderlyingConnectionAllowed property.
|
||||
*
|
||||
* @return true if access to the underlying is allowed, false otherwise.
|
||||
*/
|
||||
public synchronized boolean isAccessToUnderlyingConnectionAllowed() {
|
||||
return this.accessToUnderlyingConnectionAllowed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the value of the accessToUnderlyingConnectionAllowed property.
|
||||
* It controls if the PoolGuard allows access to the underlying connection.
|
||||
* (Default: false)
|
||||
*
|
||||
* @param allow Access to the underlying connection is granted when true.
|
||||
*/
|
||||
public synchronized void setAccessToUnderlyingConnectionAllowed(boolean allow) {
|
||||
this.accessToUnderlyingConnectionAllowed = allow;
|
||||
}
|
||||
}
|
||||
3
base-jdbc/src/main/resources/singleDataSource.properties
Normal file
3
base-jdbc/src/main/resources/singleDataSource.properties
Normal file
@@ -0,0 +1,3 @@
|
||||
singleDataSource.url=jdbc:postgresql://postgres-test-instance:5432/testdb
|
||||
singleDataSource.username=testdb
|
||||
singleDataSource.password=testdb
|
||||
22
base-job/pom.xml
Normal file
22
base-job/pom.xml
Normal file
@@ -0,0 +1,22 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
|
||||
<artifactId>base-job</artifactId>
|
||||
<name>base-job</name>
|
||||
<description>Common configuration for scheduled and asynchronous job processing.</description>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-models</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,93 @@
|
||||
package com.telecominfraproject.wlan.job;
|
||||
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingDeque;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.GenericResponse;
|
||||
import com.telecominfraproject.wlan.server.exceptions.GenericErrorException;
|
||||
|
||||
/**
|
||||
* @author ekeddy
|
||||
*
|
||||
*/
|
||||
@Component
|
||||
public class JobManager {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(JobManager.class);
|
||||
|
||||
/**
|
||||
* How long we wait for a job in the job queue
|
||||
*/
|
||||
protected static final long JOB_QUEUE_POLL_TIME_MS = 1000;
|
||||
|
||||
private final int queueSize = Integer.getInteger("com.whizcontrol.JobManager.queueSize", 10000);
|
||||
/**
|
||||
* job queue
|
||||
*/
|
||||
private final BlockingQueue<Runnable> jobQueue = new LinkedBlockingDeque<>(queueSize);
|
||||
|
||||
private Thread jobManagerThread;
|
||||
|
||||
@PostConstruct
|
||||
public void startupAuditor() {
|
||||
// start up the thread
|
||||
jobManagerThread = new Thread(new Runnable() {
|
||||
private boolean isRunning = true;
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.info("Job Manager Started");
|
||||
while (isRunning) {
|
||||
try {
|
||||
Runnable job = jobQueue.poll(JOB_QUEUE_POLL_TIME_MS, TimeUnit.MILLISECONDS);
|
||||
if (null != job) {
|
||||
job.run();
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
LOG.error("Failed to run job", e);
|
||||
} catch (InterruptedException e) {
|
||||
LOG.debug("Job queue poll interrupted");
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
LOG.info("Job Manager Stopped");
|
||||
}
|
||||
}, "JobManagerThread");
|
||||
jobManagerThread.setDaemon(true);
|
||||
jobManagerThread.start();
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit a job to the queue
|
||||
* @param job
|
||||
*/
|
||||
public void submitJob(Runnable job) {
|
||||
if(!jobQueue.offer(job)){
|
||||
throw new GenericErrorException("Job Manager queue is over capacity ("+queueSize+")");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit a job to the queue
|
||||
* @param job
|
||||
*/
|
||||
public GenericResponse submitNamedJob(NamedJob job) {
|
||||
GenericResponse result = new GenericResponse();
|
||||
try {
|
||||
submitJob(job);
|
||||
result.setSuccess(true);
|
||||
result.setMessage(job.getJobName()+"scheduled");
|
||||
} catch (Exception e) {
|
||||
result.setSuccess(false);
|
||||
result.setMessage("Failed to schedule "+job.getJobName()+": " + e.getLocalizedMessage());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.job;
|
||||
|
||||
/**
|
||||
* @author ekeddy
|
||||
*
|
||||
*/
|
||||
public interface NamedJob extends Runnable {
|
||||
|
||||
String getJobName();
|
||||
}
|
||||
20
base-partitioner/pom.xml
Normal file
20
base-partitioner/pom.xml
Normal file
@@ -0,0 +1,20 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-partitioner</artifactId>
|
||||
<name>base-partitioner</name>
|
||||
<description>Interface and implementations of the partitioning service to support scalable cloud deployments.</description>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-hazelcast-client</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
/**
|
||||
* Callback used when monitor cluster
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public interface ClusterMonitorCallback {
|
||||
/**
|
||||
* Raise an issue
|
||||
*
|
||||
* @param serviceName
|
||||
* @param issueDetails
|
||||
*
|
||||
* @return issue raised
|
||||
*/
|
||||
boolean raiseIssue(String serviceName, String issueDetails);
|
||||
|
||||
/**
|
||||
* Clear an issue
|
||||
*
|
||||
* @param serviceName
|
||||
* @param issueDetails
|
||||
*
|
||||
* @return issue cleared
|
||||
*/
|
||||
boolean clearIssue(String serviceName, String issueDetails);
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class ClusterMonitorDetails {
|
||||
private final int currentSize;
|
||||
private final Integer normalSize;
|
||||
|
||||
/**
|
||||
* Node URL which has been missing from previous monitor
|
||||
*/
|
||||
private final Set<String> missingNodeUrls;
|
||||
|
||||
/**
|
||||
* Node URL which is current running
|
||||
*/
|
||||
private final Set<String> currentNodeUrls;
|
||||
|
||||
public ClusterMonitorDetails(Integer normalSize, Set<String> previousNodeUrls, Set<String> nodeUrls) {
|
||||
this.normalSize = normalSize;
|
||||
this.currentNodeUrls = new TreeSet<>(nodeUrls);
|
||||
this.currentSize = this.getCurrentNodeUrls().size();
|
||||
if (previousNodeUrls != null) {
|
||||
missingNodeUrls = new TreeSet<>();
|
||||
for (String url : previousNodeUrls) {
|
||||
if (!getCurrentNodeUrls().contains(url)) {
|
||||
getMissingNodeUrls().add(url);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
missingNodeUrls = Collections.emptySet();
|
||||
}
|
||||
}
|
||||
|
||||
public int getCurrentSize() {
|
||||
return currentSize;
|
||||
}
|
||||
|
||||
public Integer getNormalSize() {
|
||||
return normalSize;
|
||||
}
|
||||
|
||||
public Set<String> getMissingNodeUrls() {
|
||||
return missingNodeUrls;
|
||||
}
|
||||
|
||||
public Set<String> getCurrentNodeUrls() {
|
||||
return currentNodeUrls;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.time.LocalDateTime;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class DynamicPartitionMonitorData {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(DynamicPartitionMonitorData.class);
|
||||
private final String serviceName;
|
||||
private Instant lastResetTime;
|
||||
private Set<String> lastNodeUrl;
|
||||
private int normalClusterSize;
|
||||
private boolean issueRaised;
|
||||
|
||||
public DynamicPartitionMonitorData(String serviceName) {
|
||||
this.serviceName = serviceName;
|
||||
}
|
||||
|
||||
/**
|
||||
* We will raise issue when
|
||||
* <ul>
|
||||
* <li>Size reduced from normal
|
||||
* <li>Size reached zero
|
||||
* </ul>
|
||||
*
|
||||
* We will clear alarm when
|
||||
* <ul>
|
||||
* <li>Size return to normal
|
||||
* </ul>
|
||||
*
|
||||
* Normal size is reset to max non-zero partition count since the beginning
|
||||
* of the hour.
|
||||
*
|
||||
* @param clusterNodeUrls
|
||||
* @param callback
|
||||
*/
|
||||
public synchronized void checkClusterInformation(List<String> clusterNodeUrls, ClusterMonitorCallback callback) {
|
||||
LOG.debug("Cluster monitor start for {}: last normal size {}, last nodes {}, current nodes {}", this.serviceName,
|
||||
this.normalClusterSize, this.lastNodeUrl, clusterNodeUrls);
|
||||
Instant currentTime = Instant.now();
|
||||
if (lastResetTime == null) {
|
||||
this.lastResetTime = currentTime;
|
||||
this.lastNodeUrl = new TreeSet<>(clusterNodeUrls);
|
||||
this.normalClusterSize = this.lastNodeUrl.size();
|
||||
|
||||
// raise issue if clsuter size is empty
|
||||
if ((0 == this.normalClusterSize) && !this.issueRaised) {
|
||||
LOG.warn("Raise cluster issue for {}, empty cluster", this.serviceName);
|
||||
this.issueRaised = callback.raiseIssue(this.serviceName,
|
||||
BaseJsonModel.toPrettyJsonString(new ClusterMonitorDetails(null, null, lastNodeUrl)));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
Set<String> currentNodeUrl = new TreeSet<>(clusterNodeUrls);
|
||||
if (needResetNormal(currentTime)) {
|
||||
if (!currentNodeUrl.isEmpty()) {
|
||||
// only reset if current size is not empty
|
||||
this.normalClusterSize = currentNodeUrl.size();
|
||||
this.lastResetTime = currentTime;
|
||||
}
|
||||
} else if (currentNodeUrl.size() > this.normalClusterSize) {
|
||||
this.normalClusterSize = currentNodeUrl.size();
|
||||
this.lastResetTime = currentTime;
|
||||
}
|
||||
|
||||
if (currentNodeUrl.size() < this.normalClusterSize) {
|
||||
// size reduced
|
||||
if (!issueRaised) {
|
||||
LOG.warn("Raise cluster issue for {}, cluster size below normal size {}: {}", this.serviceName,
|
||||
this.normalClusterSize, currentNodeUrl);
|
||||
this.issueRaised = callback.raiseIssue(this.serviceName,
|
||||
BaseJsonModel.toPrettyJsonString(
|
||||
new ClusterMonitorDetails((0 == this.normalClusterSize) ? null : this.normalClusterSize,
|
||||
this.lastNodeUrl, currentNodeUrl)));
|
||||
}
|
||||
} else {
|
||||
if (!issueRaised && currentNodeUrl.isEmpty()) {
|
||||
LOG.warn("Raise cluster issue for {}, empty cluster size below normal size {}", this.serviceName,
|
||||
this.normalClusterSize);
|
||||
this.issueRaised = callback.raiseIssue(this.serviceName,
|
||||
BaseJsonModel.toPrettyJsonString(
|
||||
new ClusterMonitorDetails((0 == this.normalClusterSize) ? null : this.normalClusterSize,
|
||||
this.lastNodeUrl, currentNodeUrl)));
|
||||
} else if (issueRaised) {
|
||||
// normal, check if we need to clear issue
|
||||
LOG.info("Clear cluster issue for {}, cluster size restored to normal size {}: {}", this.serviceName,
|
||||
this.normalClusterSize, currentNodeUrl);
|
||||
this.issueRaised = !callback.clearIssue(this.serviceName,
|
||||
BaseJsonModel.toPrettyJsonString(
|
||||
new ClusterMonitorDetails((0 == this.normalClusterSize) ? null : this.normalClusterSize,
|
||||
this.lastNodeUrl, currentNodeUrl)));
|
||||
}
|
||||
}
|
||||
this.lastNodeUrl = currentNodeUrl;
|
||||
|
||||
LOG.debug("Cluster monitor ended for {}: normal size {}, current nodes {}", this.serviceName,
|
||||
this.normalClusterSize, this.lastNodeUrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if currentTime crossed hour boundary of the {@link #lastResetTime}
|
||||
*
|
||||
* @param currentTime
|
||||
* @return true if hour boundary crossed
|
||||
*/
|
||||
private boolean needResetNormal(Instant currentTime) {
|
||||
LocalDateTime lastResetDate = LocalDateTime.ofInstant(this.lastResetTime, ZoneOffset.UTC);
|
||||
LocalDateTime currentDate = LocalDateTime.ofInstant(currentTime, ZoneOffset.UTC);
|
||||
|
||||
return (currentDate.isAfter(lastResetDate) && (lastResetDate.getHour() != currentDate.getHour()));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.ILock;
|
||||
import com.hazelcast.core.IMap;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class DynamicServicePartitioner implements ServicePartitionerInterface {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(DynamicServicePartitioner.class);
|
||||
|
||||
protected final String mapName;
|
||||
protected final String serviceName;
|
||||
protected int totalNumberOfPartitions;
|
||||
protected int currentPartition;
|
||||
protected final HazelcastInstance hazelcastInstance;
|
||||
private ClusterMonitorCallback clusterMonitorCallback;
|
||||
protected List<String> clusterNodeUrls = Collections.emptyList();
|
||||
|
||||
private DynamicPartitionMonitorData monitorData;
|
||||
|
||||
public DynamicServicePartitioner(String serviceName, HazelcastInstance hazelcastInstance) {
|
||||
this.serviceName = serviceName;
|
||||
this.mapName = this.serviceName + "_partitioner_map";
|
||||
this.hazelcastInstance = hazelcastInstance;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTotalNumberOfPartitions() {
|
||||
return totalNumberOfPartitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getCurrentPartition() {
|
||||
return currentPartition;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void monitorCluster(ClusterMonitorCallback clusterMonitorCallback) {
|
||||
this.clusterMonitorCallback = clusterMonitorCallback;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getNodeUrls() {
|
||||
if (clusterNodeUrls == null || clusterNodeUrls.isEmpty()) {
|
||||
refreshCluster();
|
||||
}
|
||||
return clusterNodeUrls;
|
||||
}
|
||||
|
||||
@Scheduled(fixedDelay = 5 * 60 * 1000)
|
||||
public void refreshCluster() {
|
||||
IMap<String, byte[]> partitionerMap = hazelcastInstance.getMap(mapName);
|
||||
|
||||
// lock the whole partitionerMap while refresh is happening
|
||||
ILock mapLock = hazelcastInstance.getLock("lock_" + mapName);
|
||||
mapLock.lock();
|
||||
|
||||
try {
|
||||
clusterNodeUrls = new ArrayList<>();
|
||||
for (byte[] entryBytes : partitionerMap.values()) {
|
||||
ServicePartitionerNodeInfo ni = BaseJsonModel.fromZippedBytes(entryBytes,
|
||||
ServicePartitionerNodeInfo.class);
|
||||
clusterNodeUrls.add("https://" + ni.getNodeHostName() + ":" + ni.getNodePort());
|
||||
}
|
||||
|
||||
// sort the urls
|
||||
clusterNodeUrls.sort(null);
|
||||
|
||||
processNodeUrls(partitionerMap);
|
||||
|
||||
if (totalNumberOfPartitions != clusterNodeUrls.size()) {
|
||||
LOG.info("In partitioned service {} total number of partitions changed from {} to {}", serviceName,
|
||||
totalNumberOfPartitions, clusterNodeUrls.size());
|
||||
}
|
||||
|
||||
totalNumberOfPartitions = clusterNodeUrls.size();
|
||||
|
||||
} finally {
|
||||
mapLock.unlock();
|
||||
monitorClusterStatus();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Do nothing
|
||||
*
|
||||
* @param partitionerMap
|
||||
*/
|
||||
protected void processNodeUrls(IMap<String, byte[]> partitionerMap) {
|
||||
// allow subclass to process the node
|
||||
}
|
||||
|
||||
/**
|
||||
* Check the cluster.
|
||||
*/
|
||||
private void monitorClusterStatus() {
|
||||
// make a copy in case it change
|
||||
ClusterMonitorCallback callback = clusterMonitorCallback;
|
||||
if (callback == null) {
|
||||
return;
|
||||
}
|
||||
synchronized (this) {
|
||||
if (this.monitorData == null) {
|
||||
this.monitorData = new DynamicPartitionMonitorData(this.serviceName);
|
||||
}
|
||||
this.monitorData.checkClusterInformation(clusterNodeUrls, callback);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
||||
import com.hazelcast.core.HazelcastInstance;
|
||||
import com.hazelcast.core.IMap;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class DynamicServiceServerPartitioner extends DynamicServicePartitioner
|
||||
implements ServicePartitionerServerInterface {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(DynamicServiceServerPartitioner.class);
|
||||
|
||||
private final String nodeHostName;
|
||||
private final Integer nodePort;
|
||||
private final long nodeStartupTime;
|
||||
private volatile boolean nodeNeedsToRegister;
|
||||
|
||||
public DynamicServiceServerPartitioner(String serviceName, String nodeHostName, Integer nodePort,
|
||||
HazelcastInstance hazelcastInstance, ApplicationContext applicationContext) {
|
||||
super(serviceName, hazelcastInstance);
|
||||
this.nodeHostName = nodeHostName;
|
||||
this.nodePort = nodePort;
|
||||
this.nodeStartupTime = applicationContext.getStartupDate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addCurrentNodeToTheCluster() {
|
||||
nodeNeedsToRegister = true;
|
||||
refreshCluster();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void processNodeUrls(IMap<String, byte[]> partitionerMap) {
|
||||
if (nodeNeedsToRegister) {
|
||||
ServicePartitionerNodeInfo nodeInfo = new ServicePartitionerNodeInfo();
|
||||
nodeInfo.setNodeHostName(nodeHostName);
|
||||
nodeInfo.setNodePort(nodePort);
|
||||
nodeInfo.setStartupTime(nodeStartupTime);
|
||||
// compute partitionId from existing cluster info.
|
||||
|
||||
// find index of the current node in the sorted nodeUrls
|
||||
int newPartition = clusterNodeUrls.indexOf("https://" + nodeHostName + ":" + nodePort);
|
||||
if (newPartition < 0) {
|
||||
// current node is not in the list yet. will add it, re-sort
|
||||
// and recalculate newPartition
|
||||
clusterNodeUrls.add("https://" + nodeHostName + ":" + nodePort);
|
||||
// re-sort
|
||||
clusterNodeUrls.sort(null);
|
||||
newPartition = clusterNodeUrls.indexOf("https://" + nodeHostName + ":" + nodePort);
|
||||
currentPartition = newPartition;
|
||||
LOG.info(
|
||||
"In partitioned service {} registering current node {}:{} for partition {}. Total number of partitions {}",
|
||||
serviceName, nodeHostName, nodePort, currentPartition, clusterNodeUrls.size());
|
||||
|
||||
}
|
||||
|
||||
if (newPartition != currentPartition) {
|
||||
LOG.info(
|
||||
"In partitioned service {} current node {}:{} changed partition from {} to {}. Total number of partitions {}",
|
||||
serviceName, nodeHostName, nodePort, currentPartition, newPartition, clusterNodeUrls.size());
|
||||
currentPartition = newPartition;
|
||||
}
|
||||
nodeInfo.setPartitionId(currentPartition);
|
||||
|
||||
// update node registration record in the hazelcast
|
||||
partitionerMap.put(nodeInfo.getNodeHostName(), nodeInfo.toZippedBytes(), 10, TimeUnit.MINUTES);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Service partitioner can be used on both server and client sides. When used on
|
||||
* the server side it maintains current partition and total number of
|
||||
* partitions, allows current node to be registered with the cluster and
|
||||
* periodically checks for cluster partition changes. When used on the client
|
||||
* side it maintains a list of all server node urls.
|
||||
*
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public interface ServicePartitionerInterface {
|
||||
|
||||
int getTotalNumberOfPartitions();
|
||||
|
||||
int getCurrentPartition();
|
||||
|
||||
/**
|
||||
* This method is used by client side which needs to monitor the remote
|
||||
* cluster.
|
||||
*
|
||||
* @param clusterMonitorCallback
|
||||
*/
|
||||
void monitorCluster(ClusterMonitorCallback clusterMonitorCallback);
|
||||
|
||||
/**
|
||||
* This method is mostly used by the client side of the service to route and
|
||||
* broadcast messages.
|
||||
*
|
||||
* @return list of urls of all the nodes known to partitioned service
|
||||
*/
|
||||
List<String> getNodeUrls();
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class ServicePartitionerNodeInfo extends BaseJsonModel {
|
||||
private static final long serialVersionUID = -5007799853648354860L;
|
||||
private String nodeHostName;
|
||||
private Integer nodePort;
|
||||
private long startupTime;
|
||||
private int partitionId;
|
||||
|
||||
public long getStartupTime() {
|
||||
return startupTime;
|
||||
}
|
||||
|
||||
public void setStartupTime(long startupTime) {
|
||||
this.startupTime = startupTime;
|
||||
}
|
||||
|
||||
public int getPartitionId() {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public void setPartitionId(int partitionId) {
|
||||
this.partitionId = partitionId;
|
||||
}
|
||||
|
||||
public String getNodeHostName() {
|
||||
return nodeHostName;
|
||||
}
|
||||
|
||||
public void setNodeHostName(String nodeHostName) {
|
||||
this.nodeHostName = nodeHostName;
|
||||
}
|
||||
|
||||
public Integer getNodePort() {
|
||||
return nodePort;
|
||||
}
|
||||
|
||||
public void setNodePort(Integer nodePort) {
|
||||
this.nodePort = nodePort;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
public interface ServicePartitionerServerInterface extends ServicePartitionerInterface {
|
||||
|
||||
/**
|
||||
* This method is used by the server side of the service to register itself
|
||||
* with the cluster.
|
||||
*/
|
||||
void addCurrentNodeToTheCluster();
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
package com.telecominfraproject.wlan.partitioner;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
public class StaticServicePartitioner implements ServicePartitionerInterface {
|
||||
|
||||
private final String serviceName;
|
||||
private final int totalNumberOfPartitions;
|
||||
private final int currentPartition;
|
||||
|
||||
public StaticServicePartitioner(String serviceName) {
|
||||
this(serviceName, serviceName + ".numPartitions", serviceName + ".currentPartition");
|
||||
}
|
||||
|
||||
public StaticServicePartitioner(String serviceName, String totalNumberOfPartitionsPropName,
|
||||
String currentPartitionPropName) {
|
||||
this.serviceName = serviceName;
|
||||
this.totalNumberOfPartitions = Integer.getInteger(totalNumberOfPartitionsPropName, 1);
|
||||
this.currentPartition = Integer.getInteger(currentPartitionPropName, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTotalNumberOfPartitions() {
|
||||
return totalNumberOfPartitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getCurrentPartition() {
|
||||
return currentPartition;
|
||||
}
|
||||
|
||||
public String getServiceName() {
|
||||
return serviceName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getNodeUrls() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void monitorCluster(ClusterMonitorCallback clusterMonitorCallback) {
|
||||
// DO NOTHING for now
|
||||
}
|
||||
|
||||
}
|
||||
29
base-remote-tests/pom.xml
Normal file
29
base-remote-tests/pom.xml
Normal file
@@ -0,0 +1,29 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-remote-tests</artifactId>
|
||||
<name>base-remote-tests</name>
|
||||
<description>Common classes used by the unit tests of remote service interfaces.</description>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-tx</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-test</artifactId>
|
||||
</dependency>
|
||||
<!-- https://mvnrepository.com/artifact/junit/junit -->
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,136 @@
|
||||
package com.telecominfraproject.wlan.remote.tests;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.junit.runner.RunWith;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.web.context.WebServerApplicationContext;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Primary;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.core.env.MapPropertySource;
|
||||
import org.springframework.core.env.MutablePropertySources;
|
||||
import org.springframework.core.env.Profiles;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.annotation.DirtiesContext.ClassMode;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
import org.springframework.transaction.PlatformTransactionManager;
|
||||
import org.springframework.transaction.TransactionDefinition;
|
||||
import org.springframework.transaction.TransactionException;
|
||||
import org.springframework.transaction.TransactionStatus;
|
||||
import org.springframework.transaction.support.SimpleTransactionStatus;
|
||||
|
||||
import com.telecominfraproject.wlan.server.RemoteTestServer;
|
||||
|
||||
/**
|
||||
* Base class for remote integration tests. Starts an embedded web application
|
||||
* server on port assigned by OS.
|
||||
*
|
||||
* Meant to be used with *-in-memory datastores. Uses simulated
|
||||
* PlatformTransactionManager which does nothing.
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* @ActiveProfiles(profiles = { "integration_test", "no_ssl", "http_digest_auth",
|
||||
* "rest-template-single-user-per-service-digest-auth" })
|
||||
* @Import(value = { TestConfiguration.class })
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* See
|
||||
* {@link https://docs.spring.io/spring-boot/docs/current/reference/html/boot-features-testing.html}
|
||||
*
|
||||
* @author dtop
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@SpringBootTest(
|
||||
webEnvironment = WebEnvironment.RANDOM_PORT,
|
||||
classes = { RemoteTestServer.class },
|
||||
value = { "whizcontrol.serviceUser=user", "whizcontrol.servicePassword=password",
|
||||
"whizcontrol.csrf-enabled=false" })
|
||||
@DirtiesContext(classMode = ClassMode.AFTER_CLASS)
|
||||
public abstract class BaseRemoteTest {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BaseRemoteTest.class);
|
||||
|
||||
private static final String TEST_PROPERTY_SOURCE = BaseRemoteTest.class.getSimpleName();
|
||||
|
||||
@Autowired
|
||||
protected WebServerApplicationContext server;
|
||||
|
||||
@Autowired
|
||||
protected ConfigurableEnvironment env;
|
||||
|
||||
@Value("${local.server.port}")
|
||||
protected String port;
|
||||
|
||||
protected void configureBaseUrl(String propertyName) {
|
||||
if (env.getProperty(propertyName) == null) {
|
||||
if (env.acceptsProfiles(Profiles.of("use_ssl"))) {
|
||||
addProperties(propertyName, "https://localhost:" + server.getWebServer().getPort());
|
||||
} else {
|
||||
addProperties(propertyName, "http://localhost:" + server.getWebServer().getPort());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void addProperties(String propertyName, Object value) {
|
||||
if (env.getProperty(propertyName) == null) {
|
||||
MutablePropertySources propertySources = env.getPropertySources();
|
||||
MapPropertySource ps = (MapPropertySource) propertySources.get(TEST_PROPERTY_SOURCE);
|
||||
if (null != ps) {
|
||||
ps.getSource().put(propertyName, value);
|
||||
} else {
|
||||
HashMap<String, Object> myMap = new HashMap<>();
|
||||
myMap.put(propertyName, value);
|
||||
propertySources.addFirst(new MapPropertySource(TEST_PROPERTY_SOURCE, myMap));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Configuration
|
||||
// @PropertySource({ "classpath:persistence-${envTarget:dev}.properties" })
|
||||
public static class Config {
|
||||
|
||||
@Bean
|
||||
@Primary
|
||||
public PlatformTransactionManager transactionManager() {
|
||||
PlatformTransactionManager ptm = new PlatformTransactionManager() {
|
||||
|
||||
{
|
||||
LOG.info("*** Using simulated PlatformTransactionManager");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback(TransactionStatus status) throws TransactionException {
|
||||
LOG.info("Simulating Rollback for {}", status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit(TransactionStatus status) throws TransactionException {
|
||||
LOG.info("Simulating Commit for {}", status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionStatus getTransaction(TransactionDefinition definition) throws TransactionException {
|
||||
LOG.info("Simulating getTransaction for {}", definition);
|
||||
TransactionStatus ts = new SimpleTransactionStatus();
|
||||
return ts;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
return ptm;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.server;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.context.annotation.ComponentScan;
|
||||
|
||||
/**
|
||||
* Use for perform remote testing.
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
@ComponentScan(basePackages = { "com.whizcontrol" })
|
||||
@EnableAutoConfiguration
|
||||
public class RemoteTestServer {
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(RemoteTestServer.class, args);
|
||||
}
|
||||
}
|
||||
22
base-scheduler/pom.xml
Normal file
22
base-scheduler/pom.xml
Normal file
@@ -0,0 +1,22 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-scheduler</artifactId>
|
||||
<name>base-scheduler</name>
|
||||
<description>Components and data structures used by the scheduled jobs.</description>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-models</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,316 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.scheduler;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.core.model.scheduler.TimeWindowValue;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobMap;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobMapStatus;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobSchedule;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.ScheduleThrottle;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.ScheduledJob;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobMap.JobDetails;
|
||||
|
||||
/**
|
||||
* Schedule Service
|
||||
*
|
||||
* @param T
|
||||
* Job Id
|
||||
*
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public abstract class BaseScheduler<I extends Comparable<I>> {
|
||||
|
||||
/**
|
||||
* Frequency to check the {@link #jobMap}
|
||||
*/
|
||||
private static final long MAX_SLEEP_TIME_MS = TimeUnit.MINUTES.toMillis(1);
|
||||
|
||||
private boolean endScheduler = false;
|
||||
/**
|
||||
* Map of scheduled job
|
||||
*/
|
||||
private final JobMap<I> jobMap = new JobMap<>();
|
||||
private final Logger Logger;
|
||||
private Thread runThread;
|
||||
private boolean startScheduler = false;
|
||||
|
||||
private final ScheduleThrottle throttle;
|
||||
|
||||
public BaseScheduler(Logger Logger, final ScheduleThrottle throttle) {
|
||||
if (null != Logger) {
|
||||
this.Logger = Logger;
|
||||
} else {
|
||||
this.Logger = LoggerFactory.getLogger(getClass());
|
||||
}
|
||||
this.throttle = throttle;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel a job. It will invoke {@linkplain ScheduledJob#cancel()}
|
||||
*
|
||||
* @param jobId
|
||||
*/
|
||||
public void cancelJob(final I jobId) {
|
||||
Logger.debug("cancelJob({})", jobId);
|
||||
if (null == jobId) {
|
||||
return;
|
||||
}
|
||||
JobDetails<I> jobDetails = this.jobMap.removeJob(jobId);
|
||||
if (null != jobDetails) {
|
||||
jobDetails.getJob().cancel(jobDetails.getSchedule().getJobId(), System.currentTimeMillis(),
|
||||
jobDetails.getSchedule().getTimeWindows());
|
||||
}
|
||||
Logger.debug("cancelJob({}) returns", jobId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the name of the scheduler
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
abstract public String getName();
|
||||
|
||||
/**
|
||||
* Get the job map status
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public JobMapStatus<I> getStatus() {
|
||||
return this.jobMap.getStatus();
|
||||
}
|
||||
|
||||
public ScheduleThrottle getThrottle() {
|
||||
return throttle;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if schedule thread is running.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public boolean isRunning() {
|
||||
Thread thread = this.runThread;
|
||||
return (null != thread) && thread.isAlive();
|
||||
}
|
||||
|
||||
public boolean shutdownScheduler(long timeout) {
|
||||
synchronized (this) {
|
||||
if (!startScheduler) {
|
||||
throw new IllegalStateException("Scheduler not running");
|
||||
}
|
||||
endScheduler = true;
|
||||
}
|
||||
long waitTill = System.currentTimeMillis() + timeout;
|
||||
|
||||
if (null != this.runThread) {
|
||||
for (long currentTime = System.currentTimeMillis(); currentTime < waitTill; currentTime = System
|
||||
.currentTimeMillis()) {
|
||||
if (null != this.runThread) {
|
||||
if (this.runThread.isAlive()) {
|
||||
synchronized (this.jobMap) {
|
||||
this.jobMap.notifyAll();
|
||||
}
|
||||
} else {
|
||||
this.runThread = null;
|
||||
}
|
||||
}
|
||||
|
||||
synchronized (this) {
|
||||
try {
|
||||
if (null == this.runThread) {
|
||||
this.startScheduler = false;
|
||||
break;
|
||||
}
|
||||
this.wait(TimeUnit.SECONDS.toMillis(1));
|
||||
} catch (InterruptedException e) {
|
||||
// do nothing
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return this.startScheduler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the scheduler run thread
|
||||
*
|
||||
* @param timeout
|
||||
*
|
||||
* @return true if thread is started
|
||||
*/
|
||||
public boolean startScheduler(long timeout) {
|
||||
synchronized (this) {
|
||||
if (null != this.runThread) {
|
||||
throw new IllegalStateException("Run thread already started");
|
||||
}
|
||||
|
||||
this.runThread = new Thread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
runScheduleLoopUntilExit();
|
||||
}
|
||||
});
|
||||
}
|
||||
this.runThread.setDaemon(true);
|
||||
this.runThread.setName(getName());
|
||||
long waitTill = System.currentTimeMillis() + timeout;
|
||||
this.runThread.start();
|
||||
|
||||
// wait for start signal
|
||||
for (long currentTime = System.currentTimeMillis(); currentTime < waitTill; currentTime = System
|
||||
.currentTimeMillis()) {
|
||||
synchronized (this) {
|
||||
if (startScheduler) {
|
||||
break;
|
||||
}
|
||||
try {
|
||||
this.wait(TimeUnit.SECONDS.toMillis(1));
|
||||
} catch (InterruptedException exp) {
|
||||
// ignore
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
return startScheduler;
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit a job to run at the time window.
|
||||
*
|
||||
* When the time come, it will enqueue the job and then run the
|
||||
* {@linkplain ScheduledJob#runJob(Object, long, TimeWindowValue)}
|
||||
*
|
||||
* @throws IllegalArgumentException
|
||||
* if schedule has empty time window
|
||||
* @throws IllegalStateException
|
||||
* if scheduler is not running
|
||||
* @param schedule
|
||||
* @param job
|
||||
*/
|
||||
public void submitJob(final JobSchedule<I> schedule, final ScheduledJob<I> job) {
|
||||
if (null == this.runThread) {
|
||||
throw new IllegalStateException("Schedule is not running");
|
||||
}
|
||||
if (schedule.getTimeWindows().isEmpty()) {
|
||||
throw new IllegalArgumentException("Empty time window in " + schedule);
|
||||
}
|
||||
this.Logger.debug("submitJob({},{})", schedule, job);
|
||||
this.jobMap.addJob(schedule, job);
|
||||
synchronized (this.jobMap) {
|
||||
this.jobMap.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
final public String toString() {
|
||||
return BaseJsonModel.toJsonString(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Subclass should run this in a fixed timer interval. It returns the
|
||||
* {@linkplain TimeWindowValue#getBeginTime()} of the first job in the
|
||||
* queue.
|
||||
*
|
||||
* @param currentTime
|
||||
* @return begin time for the first job in the queue
|
||||
*/
|
||||
private Long runScheduledJob(long currentTime) {
|
||||
Long firstStartup = null;
|
||||
JobSchedule<I> timer = jobMap.getFirstTimer();
|
||||
if (null != timer) {
|
||||
firstStartup = timer.getTimeWindows().getBeginTime();
|
||||
if (timer.getTimeWindows().isWithin(currentTime)) {
|
||||
JobDetails<I> jobDetails = jobMap.removeJob(timer);
|
||||
if (null != jobDetails) {
|
||||
try {
|
||||
enqueuJob(jobDetails);
|
||||
} catch (Exception exp) {
|
||||
getLogger().error("enqueuJob({}) failed due to {} exception {}", jobDetails,
|
||||
exp.getClass().getSimpleName(), exp.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return firstStartup;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run this in a dedicated thread
|
||||
*/
|
||||
private void runScheduleLoopUntilExit() {
|
||||
Logger.info("Starting scheduler run loop thread");
|
||||
synchronized (this) {
|
||||
if (startScheduler) {
|
||||
throw new IllegalStateException("Scheduler already started running");
|
||||
}
|
||||
startScheduler = true;
|
||||
endScheduler = false;
|
||||
this.notifyAll();
|
||||
}
|
||||
try {
|
||||
while (!this.endScheduler) {
|
||||
try {
|
||||
long currentTime = System.currentTimeMillis();
|
||||
if (null != this.getThrottle()) {
|
||||
currentTime = this.getThrottle().ready(currentTime);
|
||||
}
|
||||
Long nextStartupTime = runScheduledJob(currentTime);
|
||||
long waitTime = MAX_SLEEP_TIME_MS;
|
||||
if (null != nextStartupTime) {
|
||||
currentTime = System.currentTimeMillis();
|
||||
if (nextStartupTime <= currentTime) {
|
||||
waitTime = 0;
|
||||
} else if (nextStartupTime < currentTime + MAX_SLEEP_TIME_MS) {
|
||||
waitTime = nextStartupTime - currentTime;
|
||||
}
|
||||
}
|
||||
if (waitTime > 0) {
|
||||
synchronized (this.jobMap) {
|
||||
if (!this.endScheduler) {
|
||||
this.jobMap.wait(waitTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException exp) {
|
||||
// ignore it
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
synchronized (this) {
|
||||
if (startScheduler) {
|
||||
startScheduler = false;
|
||||
}
|
||||
}
|
||||
Logger.info("Existing scheduler run loop thread");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sub-class should remove all the job enqueued and call cancel
|
||||
*/
|
||||
abstract protected void emptyJobQueue();
|
||||
|
||||
/**
|
||||
* Sub-class should enqueue the job and run it
|
||||
*
|
||||
* @param jobDetails
|
||||
*/
|
||||
abstract protected void enqueuJob(JobDetails<I> jobDetails);
|
||||
|
||||
@JsonIgnore
|
||||
protected Logger getLogger() {
|
||||
return Logger;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.scheduler.models;
|
||||
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentSkipListSet;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
|
||||
/**
|
||||
* @param I
|
||||
* - Id
|
||||
* @param J
|
||||
* - Job
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class JobMap<I extends Comparable<I>> {
|
||||
public static class JobDetails<C extends Comparable<C>> {
|
||||
private final JobSchedule<C> schedule;
|
||||
private final ScheduledJob<C> job;
|
||||
|
||||
public JobDetails(final JobSchedule<C> schedule, final ScheduledJob<C> job) {
|
||||
this.schedule = schedule;
|
||||
this.job = job;
|
||||
}
|
||||
|
||||
public ScheduledJob<C> getJob() {
|
||||
return job;
|
||||
}
|
||||
|
||||
public JobSchedule<C> getSchedule() {
|
||||
return schedule;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return BaseJsonModel.toJsonString(this);
|
||||
}
|
||||
}
|
||||
|
||||
private ConcurrentHashMap<I, JobDetails<I>> jobMap = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* Use to determine the upcoming time
|
||||
*/
|
||||
private ConcurrentSkipListSet<JobSchedule<I>> timerSet = new ConcurrentSkipListSet<>();
|
||||
|
||||
public boolean addJob(JobSchedule<I> id, ScheduledJob<I> job) {
|
||||
JobDetails<I> jobDetails = new JobDetails<>(id, job);
|
||||
if (null == this.jobMap.putIfAbsent(id.getJobId(), jobDetails)) {
|
||||
if (!this.timerSet.add(id)) {
|
||||
this.jobMap.remove(id.getJobId(), jobDetails);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public JobSchedule<I> getFirstTimer() {
|
||||
try {
|
||||
if (!timerSet.isEmpty()) {
|
||||
return timerSet.first();
|
||||
}
|
||||
} catch (NoSuchElementException exp) {
|
||||
// just in case it's removed after the isEmpty check
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public int getSize() {
|
||||
return jobMap.size();
|
||||
}
|
||||
|
||||
public JobDetails<I> removeJob(final I key) {
|
||||
JobDetails<I> details = this.jobMap.remove(key);
|
||||
if (null != details) {
|
||||
this.timerSet.remove(details.getSchedule());
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
public JobDetails<I> removeJob(final JobSchedule<I> schedule) {
|
||||
boolean result = this.timerSet.remove(schedule);
|
||||
if (result) {
|
||||
return this.jobMap.remove(schedule.getJobId());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String toString() {
|
||||
return BaseJsonModel.toJsonString(this);
|
||||
}
|
||||
|
||||
public JobMapStatus<I> getStatus() {
|
||||
return new JobMapStatus<>(getSize(), getFirstTimer());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.scheduler.models;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class JobMapStatus<I extends Comparable<I>> {
|
||||
|
||||
private final int size;
|
||||
private final JobSchedule<I> firstTimer;
|
||||
|
||||
public JobMapStatus(int size, JobSchedule<I> firstTimer) {
|
||||
this.size = size;
|
||||
this.firstTimer = firstTimer;
|
||||
}
|
||||
|
||||
public int getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public JobSchedule<I> getFirstTimer() {
|
||||
return firstTimer;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package com.telecominfraproject.wlan.core.scheduler.models;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.core.model.scheduler.ImmutableTimeWindow;
|
||||
|
||||
public class JobSchedule<T extends Comparable<T>> implements Comparable<JobSchedule<T>> {
|
||||
private final T jobId;
|
||||
private final ImmutableTimeWindow timeWindows;
|
||||
|
||||
public JobSchedule(T jobId, ImmutableTimeWindow timeWindows) {
|
||||
this.jobId = jobId;
|
||||
this.timeWindows = timeWindows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare {@link #timeWindows} first, the {@link #jobId}.
|
||||
*
|
||||
* Use to determine the upcoming schedule.
|
||||
*/
|
||||
@Override
|
||||
public int compareTo(JobSchedule<T> other) {
|
||||
int result = timeWindows.compareTo(other.timeWindows);
|
||||
if (0 == result) {
|
||||
return jobId.compareTo(other.jobId);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public T getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public ImmutableTimeWindow getTimeWindows() {
|
||||
return timeWindows;
|
||||
}
|
||||
|
||||
public String getTimeWindowsDetails() {
|
||||
return (null == timeWindows) ? null : timeWindows.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return BaseJsonModel.toJsonString(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jobId, timeWindows);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (!(obj instanceof JobSchedule)) {
|
||||
return false;
|
||||
}
|
||||
JobSchedule other = (JobSchedule) obj;
|
||||
return Objects.equals(jobId, other.jobId) && Objects.equals(timeWindows, other.timeWindows);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.scheduler.models;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
|
||||
import ch.qos.logback.core.util.Duration;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public class ScheduleRateThrottle implements ScheduleThrottle {
|
||||
private final RateLimiter impl;
|
||||
private final double permitsPerSecond;
|
||||
private final Duration warmupPeroid;
|
||||
|
||||
/**
|
||||
* Use {@link RateLimiter} to provide throttle control.
|
||||
*
|
||||
* See {@linkplain RateLimiter#create(double, long, TimeUnit)}
|
||||
*
|
||||
* @param permitsPerSecond
|
||||
* @param warmupPeriod
|
||||
* @param unit
|
||||
*/
|
||||
public ScheduleRateThrottle(double permitsPerSecond, long warmupPeriod, TimeUnit unit) {
|
||||
this.permitsPerSecond = permitsPerSecond;
|
||||
this.warmupPeroid = Duration.buildByMilliseconds(unit.toMillis(warmupPeriod));
|
||||
this.impl = RateLimiter.create(permitsPerSecond, warmupPeriod, unit);
|
||||
}
|
||||
|
||||
public double getPermitsPerSecond() {
|
||||
return permitsPerSecond;
|
||||
}
|
||||
|
||||
public Duration getWarmupPeroid() {
|
||||
return warmupPeroid;
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see com.telecominfraproject.wlan.core.scheduler.models.ScheduleThrottle#ready(long)
|
||||
*/
|
||||
@Override
|
||||
public long ready(long currentTime) {
|
||||
if (this.impl.tryAcquire()) {
|
||||
return System.currentTimeMillis();
|
||||
}
|
||||
return currentTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringBuilder().append("{\"_type\":\"").append(this.getClass().getSimpleName())
|
||||
.append("\",\"permitsPerSecond\":").append(this.permitsPerSecond).append(",\"warmupPeriod\":\"")
|
||||
.append(this.warmupPeroid).append("\"}").toString();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.scheduler.models;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public interface ScheduleThrottle {
|
||||
/**
|
||||
* control how fast schedule pull job off the JobMap
|
||||
*
|
||||
* @param currentTime
|
||||
* when throttle is called
|
||||
* @return if paused, return value from
|
||||
* {@linkplain System#currentTimeMillis()}
|
||||
*/
|
||||
public long ready(long currentTime);
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
*
|
||||
*/
|
||||
package com.telecominfraproject.wlan.core.scheduler.models;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.scheduler.ImmutableTimeWindow;
|
||||
|
||||
/**
|
||||
* @author yongli
|
||||
*
|
||||
*/
|
||||
public interface ScheduledJob<I> {
|
||||
|
||||
/**
|
||||
* Run the job. Method should check startTime against the timeWindows to
|
||||
* decide if it should proceed.
|
||||
*
|
||||
* @param id
|
||||
* @param startTime
|
||||
* - time when run is invoked
|
||||
* @param timeWindows
|
||||
* - time windows when the job is schedule to run
|
||||
*/
|
||||
public void runJob(final I id, long startTime, final ImmutableTimeWindow timeWindows);
|
||||
|
||||
/**
|
||||
* Signal the job is cancelled.
|
||||
*
|
||||
* @param id
|
||||
* @param startTime
|
||||
* - time when run is invoked
|
||||
* @param immutableTimeWindow
|
||||
* - time windows when the job is schedule to run
|
||||
*/
|
||||
public void cancel(final I id, long startTime, final ImmutableTimeWindow immutableTimeWindow);
|
||||
|
||||
/**
|
||||
* Test if the job is cancelled.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public boolean isCancelled();
|
||||
}
|
||||
@@ -0,0 +1,229 @@
|
||||
package com.telecominfraproject.wlan.core.scheduler;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
|
||||
import com.telecominfraproject.wlan.core.model.scheduler.EpochTimeWindow;
|
||||
import com.telecominfraproject.wlan.core.model.scheduler.ImmutableTimeWindow;
|
||||
import com.telecominfraproject.wlan.core.scheduler.BaseScheduler;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobSchedule;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.ScheduleRateThrottle;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.ScheduledJob;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobMap.JobDetails;
|
||||
|
||||
public class SchedulerTests {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SchedulerTests.class);
|
||||
|
||||
public static interface TimeWindowTestFactory<I> {
|
||||
/**
|
||||
* create a time window
|
||||
*
|
||||
* @param jobId
|
||||
* @return
|
||||
*/
|
||||
ImmutableTimeWindow create(I jobId);
|
||||
|
||||
Long getTestEnd();
|
||||
|
||||
void clearResult();
|
||||
|
||||
/**
|
||||
* Set up total test and test interval
|
||||
*
|
||||
* @param now
|
||||
* @param total
|
||||
* @param interval
|
||||
*/
|
||||
void setTestInterval(long now, long total, long interval);
|
||||
|
||||
/**
|
||||
* verify the jobDetails
|
||||
*
|
||||
* @param jobDetails
|
||||
*/
|
||||
void verify(JobDetails<Long> jobDetails);
|
||||
|
||||
void verifyTotal();
|
||||
};
|
||||
|
||||
public static class TestScheduler extends BaseScheduler<Long> {
|
||||
private TimeWindowTestFactory<Long> factory;
|
||||
|
||||
public TestScheduler(TimeWindowTestFactory<Long> factory) {
|
||||
// half second warm up and 1.1 / second rate
|
||||
super(LOG, new ScheduleRateThrottle(1.1, 500, TimeUnit.MICROSECONDS));
|
||||
this.factory = factory;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void enqueuJob(JobDetails<Long> jobDetails) {
|
||||
LOG.debug("enqueuJob {}", jobDetails);
|
||||
assertNotNull(jobDetails);
|
||||
factory.verify(jobDetails);
|
||||
ScheduledJob<Long> job = jobDetails.getJob();
|
||||
JobSchedule<Long> schedule = jobDetails.getSchedule();
|
||||
job.runJob(schedule.getJobId(), System.currentTimeMillis(), schedule.getTimeWindows());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void emptyJobQueue() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return getClass().getSimpleName();
|
||||
}
|
||||
}
|
||||
|
||||
public static class TestJob implements ScheduledJob<Long> {
|
||||
private final Long jobId;
|
||||
|
||||
public TestJob(long jobId) {
|
||||
this.jobId = jobId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runJob(Long id, long startTime, final ImmutableTimeWindow timeWindows) {
|
||||
assertNotNull(id);
|
||||
assertEquals(jobId, id);
|
||||
assertNotNull(timeWindows);
|
||||
assertTrue(timeWindows.isWithin(System.currentTimeMillis()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel(Long id, long startTime, ImmutableTimeWindow timeWindows) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCancelled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public Long getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return BaseJsonModel.toJsonString(this);
|
||||
}
|
||||
}
|
||||
|
||||
private static final int MAX_LOOP = 10;
|
||||
|
||||
@Test
|
||||
public void testBasic() {
|
||||
/**
|
||||
* Create a reverse order test factory
|
||||
*/
|
||||
TimeWindowTestFactory<Long> factory = new TimeWindowTestFactory<Long>() {
|
||||
private Long testEnd;
|
||||
private Long total;
|
||||
private Long interval;
|
||||
private Long lastJobId;
|
||||
private Long verified;
|
||||
|
||||
@Override
|
||||
public void setTestInterval(long now, long total, long interval) {
|
||||
this.testEnd = now + (total + 1) * 2 * interval;
|
||||
this.total = total;
|
||||
this.interval = interval;
|
||||
this.lastJobId = null;
|
||||
this.verified = 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableTimeWindow create(Long jobId) {
|
||||
assertNotNull(testEnd);
|
||||
assertTrue(jobId < total);
|
||||
long end = testEnd - jobId * 2 * interval;
|
||||
long begin = end - interval;
|
||||
return new ImmutableTimeWindow(begin, end);
|
||||
}
|
||||
|
||||
private void verify(Long jobId, ImmutableTimeWindow value) {
|
||||
ImmutableTimeWindow expected = create(jobId);
|
||||
assertEquals(0, EpochTimeWindow.compare(expected, value));
|
||||
}
|
||||
|
||||
private void verify(Long jobId) {
|
||||
assertNotNull(jobId);
|
||||
if (null != lastJobId) {
|
||||
// ensure reverse order
|
||||
assertTrue(jobId < lastJobId);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify(JobDetails<Long> jobDetails) {
|
||||
assertNotNull(jobDetails);
|
||||
JobSchedule<Long> schedule = jobDetails.getSchedule();
|
||||
verify(schedule.getJobId(), schedule.getTimeWindows());
|
||||
verify(schedule.getJobId());
|
||||
ScheduledJob<Long> job = jobDetails.getJob();
|
||||
assertNotNull(job);
|
||||
++verified;
|
||||
// signal we've all done
|
||||
if (this.verified.equals(this.total)) {
|
||||
synchronized (this) {
|
||||
this.notifyAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearResult() {
|
||||
this.lastJobId = null;
|
||||
this.verified = 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTestEnd() {
|
||||
return this.testEnd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verifyTotal() {
|
||||
assertEquals(total, verified);
|
||||
}
|
||||
};
|
||||
|
||||
TestScheduler scheduler = new TestScheduler(factory);
|
||||
boolean start = scheduler.startScheduler(TimeUnit.SECONDS.toMillis(10));
|
||||
assertTrue(start);
|
||||
factory.setTestInterval(System.currentTimeMillis(), MAX_LOOP, TimeUnit.SECONDS.toMillis(1));
|
||||
factory.clearResult();
|
||||
|
||||
for (int i = 0; i < MAX_LOOP; ++i) {
|
||||
long jobId = i;
|
||||
JobSchedule<Long> schedule = new JobSchedule<Long>(jobId, factory.create(jobId));
|
||||
ScheduledJob<Long> job = new TestJob(jobId);
|
||||
scheduler.submitJob(schedule, job);
|
||||
}
|
||||
|
||||
Long testEndTime = factory.getTestEnd();
|
||||
assertNotNull(testEndTime);
|
||||
LOG.debug("Test started at {}, last end time {}", Instant.now(), Instant.ofEpochMilli(testEndTime));
|
||||
synchronized (factory) {
|
||||
try {
|
||||
factory.wait(testEndTime - System.currentTimeMillis());
|
||||
} catch (InterruptedException e) {
|
||||
}
|
||||
factory.verifyTotal();
|
||||
}
|
||||
|
||||
start = scheduler.shutdownScheduler(TimeUnit.SECONDS.toMillis(10));
|
||||
assertFalse(start);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,226 @@
|
||||
package com.telecominfraproject.wlan.core.scheduler.models;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.scheduler.ImmutableTimeWindow;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobMap;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobSchedule;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.ScheduledJob;
|
||||
import com.telecominfraproject.wlan.core.scheduler.models.JobMap.JobDetails;
|
||||
|
||||
public class JobMapTests {
|
||||
|
||||
public static class TestJob implements ScheduledJob<Long> {
|
||||
|
||||
private final Long jobId;
|
||||
|
||||
public TestJob(long jobId) {
|
||||
this.jobId = jobId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runJob(Long id, long startTime, final ImmutableTimeWindow timeWindows) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancel(Long id, long startTime, ImmutableTimeWindow timeWindows) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCancelled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public Long getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
}
|
||||
|
||||
public static interface TimeWindowTestFactory {
|
||||
/**
|
||||
* create a time window
|
||||
*
|
||||
* @param jobId
|
||||
* @return
|
||||
*/
|
||||
ImmutableTimeWindow create(long jobId);
|
||||
|
||||
/**
|
||||
* verify the time window
|
||||
*
|
||||
* @param jobId
|
||||
* @param value
|
||||
*/
|
||||
void verify(long jobId, final ImmutableTimeWindow value);
|
||||
|
||||
/**
|
||||
* verify the jobId order
|
||||
*
|
||||
* @param lastJobId
|
||||
* @param jobId
|
||||
*/
|
||||
void verify(Long lastJobId, Long jobId);
|
||||
};
|
||||
|
||||
private static final int MAX_JOB = 10;
|
||||
|
||||
private JobMap<Long> jobMap;
|
||||
private final AtomicLong jobId = new AtomicLong(10L);
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
this.jobMap = new JobMap<>();
|
||||
}
|
||||
|
||||
@After
|
||||
public void finish() {
|
||||
this.jobMap = null;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScheduleOrdered() {
|
||||
// same order w.r.t jobId, same duration
|
||||
TimeWindowTestFactory factory = new TimeWindowTestFactory() {
|
||||
|
||||
@Override
|
||||
public void verify(long jobId, ImmutableTimeWindow value) {
|
||||
assertNotNull(value);
|
||||
assertEquals(jobId, value.getBeginTime());
|
||||
assertEquals(jobId + TimeUnit.MINUTES.toMillis(1), value.getEndTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableTimeWindow create(long jobId) {
|
||||
return new ImmutableTimeWindow(jobId, jobId + TimeUnit.MINUTES.toMillis(1));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify(Long lastJobId, Long jobId) {
|
||||
// increasing order
|
||||
if (null != lastJobId) {
|
||||
assertTrue(lastJobId < jobId);
|
||||
}
|
||||
}
|
||||
};
|
||||
testSchelder(factory);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScheduleReverse() {
|
||||
// reverse order w.r.t jobId
|
||||
TimeWindowTestFactory factory = new TimeWindowTestFactory() {
|
||||
|
||||
@Override
|
||||
public void verify(long jobId, ImmutableTimeWindow value) {
|
||||
assertNotNull(value);
|
||||
assertEquals(0 - TimeUnit.MINUTES.toMillis(jobId), value.getBeginTime());
|
||||
assertEquals(0L, value.getEndTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableTimeWindow create(long jobId) {
|
||||
return new ImmutableTimeWindow(0 - TimeUnit.MINUTES.toMillis(jobId), 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify(Long lastJobId, Long jobId) {
|
||||
// increasing order
|
||||
if (null != lastJobId) {
|
||||
assertTrue(lastJobId > jobId);
|
||||
}
|
||||
}
|
||||
};
|
||||
testSchelder(factory);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScheduleExpanding() {
|
||||
// all start at same time, with expanding duration
|
||||
TimeWindowTestFactory factory = new TimeWindowTestFactory() {
|
||||
|
||||
@Override
|
||||
public void verify(long jobId, ImmutableTimeWindow value) {
|
||||
assertNotNull(value);
|
||||
assertEquals(0, value.getBeginTime());
|
||||
assertEquals(TimeUnit.MINUTES.toMillis(jobId), value.getEndTime());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableTimeWindow create(long jobId) {
|
||||
return new ImmutableTimeWindow(0, TimeUnit.MINUTES.toMillis(jobId));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify(Long lastJobId, Long jobId) {
|
||||
// increasing order
|
||||
if (null != lastJobId) {
|
||||
assertTrue(lastJobId < jobId);
|
||||
}
|
||||
}
|
||||
};
|
||||
testSchelder(factory);
|
||||
}
|
||||
|
||||
private void testSchelder(TimeWindowTestFactory factory) {
|
||||
SortedSet<Long> jobIds = createJobs(MAX_JOB, factory);
|
||||
assertEquals(MAX_JOB, this.jobMap.getSize());
|
||||
|
||||
Long lastJobId = null;
|
||||
for (int i = 0; i < MAX_JOB + 1; ++i) {
|
||||
JobSchedule<Long> timer = this.jobMap.getFirstTimer();
|
||||
if (jobIds.isEmpty()) {
|
||||
assertNull(timer);
|
||||
break;
|
||||
}
|
||||
assertNotNull(timer);
|
||||
JobDetails<Long> details = this.jobMap.removeJob(timer);
|
||||
assertNotNull(details);
|
||||
JobSchedule<Long> schedule = details.getSchedule();
|
||||
assertNotNull(schedule);
|
||||
assertNotNull(schedule.getJobId());
|
||||
factory.verify(lastJobId, schedule.getJobId());
|
||||
lastJobId = schedule.getJobId();
|
||||
assertTrue(jobIds.remove(lastJobId));
|
||||
ImmutableTimeWindow timeWindows = schedule.getTimeWindows();
|
||||
factory.verify(lastJobId, timeWindows);
|
||||
ScheduledJob<Long> job = details.getJob();
|
||||
TestJob testJob = TestJob.class.cast(job);
|
||||
assertNotNull(testJob);
|
||||
assertEquals(lastJobId, testJob.getJobId());
|
||||
assertEquals(lastJobId, schedule.getJobId());
|
||||
}
|
||||
assertEquals(0, this.jobMap.getSize());
|
||||
assertTrue(jobIds.isEmpty());
|
||||
}
|
||||
|
||||
private SortedSet<Long> createJobs(int maxJob, TimeWindowTestFactory factory) {
|
||||
SortedSet<Long> result = new TreeSet<>();
|
||||
for (int i = 0; i < maxJob; ++i) {
|
||||
long count = jobId.getAndIncrement();
|
||||
JobSchedule<Long> id = new JobSchedule<Long>(count, factory.create(count));
|
||||
ScheduledJob<Long> job = new TestJob(count);
|
||||
boolean add = this.jobMap.addJob(id, job);
|
||||
assertTrue(add);
|
||||
|
||||
add = this.jobMap.addJob(id, job);
|
||||
assertFalse(add);
|
||||
result.add(count);
|
||||
}
|
||||
assertEquals(result.size(), maxJob);
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
49
base-scheduler/src/test/resources/logback.xml
Normal file
49
base-scheduler/src/test/resources/logback.xml
Normal file
@@ -0,0 +1,49 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!-- For assistance related to logback-translator or configuration -->
|
||||
<!-- files in general, please contact the logback user mailing list -->
|
||||
<!-- at http://www.qos.ch/mailman/listinfo/logback-user -->
|
||||
<!-- -->
|
||||
<!-- For professional support please see -->
|
||||
<!-- http://www.qos.ch/shop/products/professionalSupport -->
|
||||
<!-- -->
|
||||
<configuration>
|
||||
<appender name="stdout" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-DD HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--
|
||||
<appender name="FILE" class="ch.qos.logback.core.FileAppender">
|
||||
<file>myApp.log</file>
|
||||
|
||||
<encoder>
|
||||
<pattern>%date %level [%thread] %logger{10} [%file:%line] %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
details: http://logback.qos.ch/manual/configuration.html#auto_configuration
|
||||
|
||||
runtime configuration, if need to override the defaults:
|
||||
-Dlogback.configurationFile=/path/to/logback.xml
|
||||
|
||||
for log configuration debugging - use
|
||||
-Dlogback.statusListenerClass=ch.qos.logback.core.status.OnConsoleStatusListener
|
||||
|
||||
log levels:
|
||||
OFF ERROR WARN INFO DEBUG TRACE
|
||||
-->
|
||||
<logger name="org.springframework" level="WARN"/>
|
||||
<logger name="org.reflections" level="WARN"/>
|
||||
<logger name="com.whizcontrol" level="${art2waveLogLevel:-DEBUG}"/>
|
||||
<logger name="com.netflix.servo.tag.aws.AwsInjectableTag" level="OFF"/>
|
||||
|
||||
<root level="WARN">
|
||||
<appender-ref ref="stdout"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
16
base-stream-interface/pom.xml
Normal file
16
base-stream-interface/pom.xml
Normal file
@@ -0,0 +1,16 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>base-stream-interface</artifactId>
|
||||
<name>base-stream-interface</name>
|
||||
<description>Common interface for streaming processing.</description>
|
||||
|
||||
<dependencies>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.telecominfraproject.wlan.stream;
|
||||
|
||||
|
||||
/**
|
||||
* @author ekeddy
|
||||
*
|
||||
*/
|
||||
public interface StreamInterface<T> {
|
||||
|
||||
void publish(T record);
|
||||
}
|
||||
27
common-hazelcast/pom.xml
Normal file
27
common-hazelcast/pom.xml
Normal file
@@ -0,0 +1,27 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>tip-wlan-cloud-root-pom</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<relativePath>../../tip-wlan-cloud-root</relativePath>
|
||||
</parent>
|
||||
<artifactId>common-hazelcast</artifactId>
|
||||
<name>common-hazelcast</name>
|
||||
<description>Classes that are common between Hazelcast client and Hazelcast nodes, like listeners, item processors, etc.</description>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.hazelcast</groupId>
|
||||
<artifactId>hazelcast</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- to get access to SystemAndEnvPropertyResolver utility class -->
|
||||
<dependency>
|
||||
<groupId>com.telecominfraproject.wlan</groupId>
|
||||
<artifactId>base-models</artifactId>
|
||||
<version>${tip-wlan-cloud.release.version}</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,49 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.common;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import com.hazelcast.map.AbstractEntryProcessor;
|
||||
|
||||
/**
|
||||
* This class appends an item to a List<Long> stored in a hazelcast map.
|
||||
* Usage pattern:
|
||||
* <pre>
|
||||
* IMap<String, List<Long>> tsMap = hazelcastClient.getMap(tsMapName);
|
||||
* tsMap.submitToKey(tsKey, new AppendLongToListEntryProcessor(createdTimestampToAppend) ).get();
|
||||
* </pre>
|
||||
* <b>Very important</b>: this class must implement Serializable interface because it is submitted to Hazelcast Cluster
|
||||
* @author dtop
|
||||
*/
|
||||
public class AppendLongToListEntryProcessor extends AbstractEntryProcessor<String, List<Long>> implements Serializable {
|
||||
private static final long serialVersionUID = -6960225265547599510L;
|
||||
|
||||
private long tsToAppend;
|
||||
|
||||
public AppendLongToListEntryProcessor() {
|
||||
// for serialization
|
||||
}
|
||||
|
||||
public AppendLongToListEntryProcessor(long tsToAppend) {
|
||||
this.tsToAppend = tsToAppend;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object process(Entry<String, List<Long>> entry) {
|
||||
List<Long> value = entry.getValue();
|
||||
|
||||
if(value==null){
|
||||
value = new ArrayList<>();
|
||||
}
|
||||
|
||||
// process and modify value
|
||||
if(!value.contains(tsToAppend)){
|
||||
value.add(tsToAppend);
|
||||
}
|
||||
entry.setValue(value);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.common;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import com.hazelcast.map.AbstractEntryProcessor;
|
||||
|
||||
/**
|
||||
* This class appends an item to a Set<String> stored in a hazelcast map.
|
||||
* Usage pattern:
|
||||
* <pre>
|
||||
* IMap<String, Set<String>> dirListMap = hazelcastClient.getMap(dirListMapName);
|
||||
* dirListMap.submitToKey(dirKey, new AppendStringToSetEntryProcessor(stringToAppend) ).get();
|
||||
* </pre>
|
||||
* <b>Very important</b>: this class must implement Serializable interface because it is submitted to Hazelcast Cluster
|
||||
* @author dtop
|
||||
*/
|
||||
public class AppendStringToSetEntryProcessor extends AbstractEntryProcessor<String, Set<String>> implements Serializable {
|
||||
private static final long serialVersionUID = -6960225265547599510L;
|
||||
|
||||
private String stringToAppend;
|
||||
|
||||
public AppendStringToSetEntryProcessor() {
|
||||
// for serialization
|
||||
}
|
||||
|
||||
public AppendStringToSetEntryProcessor(String stringToAppend) {
|
||||
this.stringToAppend = stringToAppend;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object process(Entry<String, Set<String>> entry) {
|
||||
Set<String> value = entry.getValue();
|
||||
|
||||
if(value==null){
|
||||
value = new HashSet<>();
|
||||
}
|
||||
|
||||
// process and modify value
|
||||
value.add(stringToAppend);
|
||||
|
||||
entry.setValue(value);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,552 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.common;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
import com.telecominfraproject.wlan.core.model.utils.SystemAndEnvPropertyResolver;
|
||||
|
||||
/**
|
||||
* @author dtop
|
||||
*
|
||||
*/
|
||||
@Configuration
|
||||
public class HazelcastObjectsConfiguration {
|
||||
|
||||
//
|
||||
// map settings for metrics, system events and equipment events
|
||||
//
|
||||
@Value("${tip.wlan.SystemEventDatastore.hazelcast.numBackups:1}")
|
||||
private int systemEventsNumBackups;
|
||||
@Value("${tip.wlan.SystemEventDatastore.hazelcast.ttlSeconds:600}")
|
||||
private int systemEventsTtlSeconds;
|
||||
@Value("${tip.wlan.SystemEventDatastore.hazelcast.mapPrefix:se-}")
|
||||
private String systemEventsMapPrefix;
|
||||
|
||||
@Value("${tip.wlan.RawEquipmentEventDatastore.hazelcast.numBackups:1}")
|
||||
private int rawEquipmentEventsNumBackups;
|
||||
@Value("${tip.wlan.RawEquipmentEventDatastore.hazelcast.ttlSeconds:600}")
|
||||
private int rawEquipmentEventsTtlSeconds;
|
||||
@Value("${tip.wlan.RawEquipmentEventDatastore.hazelcast.mapPrefix:ree-}")
|
||||
private String rawEquipmentEventsMapPrefix;
|
||||
|
||||
@Value("${tip.wlan.ServiceMetricsDatastore.hazelcast.numBackups:1}")
|
||||
private int serviceMetricsNumBackups;
|
||||
@Value("${tip.wlan.ServiceMetricsDatastore.hazelcast.ttlSeconds:600}")
|
||||
private int serviceMetricsTtlSeconds;
|
||||
@Value("${tip.wlan.ServiceMetricsDatastore.hazelcast.mapPrefix:sm-}")
|
||||
private String serviceMetricsMapPrefix;
|
||||
|
||||
//queues for managing rule engine agent queues
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.queuePrefix:re-q-}")
|
||||
private String ruleAgentQueuePrefix;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.queue.numBackups:1}")
|
||||
private int ruleAgentQueueNumBackups;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.queueMaxSize:10000}")
|
||||
private int ruleAgentQueueMaxSize;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.unassignedRuleAgentQueuesQ:unassigned-re-queues-q}")
|
||||
private String unassignedRuleAgentQueuesQName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.unassignedRuleAgentQueuesQ.numBackups:1}")
|
||||
private int unassignedRuleAgentQueuesQNumBackups;
|
||||
|
||||
//maps for managing rule engine agent queues
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineProcessMap:re-process-map}")
|
||||
private String ruleAgentProcessMapName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineProcessMap.numBackups:1}")
|
||||
private int ruleAgentProcessMapNumBackups;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineProcessMap.ttlSeconds:120}")
|
||||
private int ruleAgentProcessMapTtlSeconds;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.unassignedRuleAgentQueuesMap:unassigned-re-queues-map}")
|
||||
private String unassignedRuleAgentQueuesMapName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.unassignedRuleAgentQueuesMap.numBackups:1}")
|
||||
private int unassignedRuleAgentQueuesMapNumBackups;
|
||||
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleAgentQueueAssignmentsMap:rule-agent-q-assignments-map}")
|
||||
private String ruleAgentQueueAssignmentsMapName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleAgentQueueAssignmentsMap.numBackups:1}")
|
||||
private int ruleAgentQueueAssignmentsMapNumBackups;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineProcessConsumedCapacityMap:re-process-consumed-capacity-map}")
|
||||
private String ruleAgentProcessConsumedCapacityMapName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineProcessConsumedCapacityMap.numBackups:1}")
|
||||
private int ruleAgentProcessConsumedCapacityMapNumBackups;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineProcessConsumedCapacityMap.ttlSeconds:120}")
|
||||
private int ruleAgentProcessConsumedCapacityMapTtlSeconds;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineLevelAssignmentMap:re-level-assignment-map}")
|
||||
private String ruleEngineLevelAssignmentMapName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleEngineLevelAssignmentMap.numBackups:1}")
|
||||
private int ruleEngineLevelAssignmentMapNumBackups;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.equipmentFullPathMap:equipment-full-path-map}")
|
||||
private String equipmentFullPathMapName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.equipmentFullPathMap.numBackups:1}")
|
||||
private int equipmentFullPathMapNumBackups;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.locationFullPathMap:location-full-path-map}")
|
||||
private String locationFullPathMapName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.locationFullPathMap.numBackups:1}")
|
||||
private int locationFullPathMapNumBackups;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.workDistributionMonitorSemaphore:re-work-distribution-semaphore}")
|
||||
private String ruleEngineWorkDistributionSemaphoreName;
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.workDistributionMonitorSemaphore.numBackups:1}")
|
||||
private int ruleEngineWorkDistributionSemaphoreNumBackups;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.woorkDistributionLastmodTimestamp:re-work-distribution-lastmod}")
|
||||
private String ruleEngineWorkDistributionLastmodTimestampName;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.woorkDistributionLastmodTimestamp:re-unassigned-queue-monitor-lastmod}")
|
||||
private String ruleEngineUnasignedQueueLastmodTimestampName;
|
||||
|
||||
@Value("${tip.wlan.ruleAgent.hazelcast.ruleAgentInitialReservedCapacityMap:agent-queue-initial-reserved-capacity-map}")
|
||||
private String ruleAgentInitialReservedCapacityMapName;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* These maps hold creationTimestamp for files in HDS
|
||||
*/
|
||||
@Value("${tip.wlan.hazelcast.hdsCreationTimestampFileMapPrefix:hdsCreationTs-}")
|
||||
private String hdsCreationTimestampFileMapPrefix;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.systemEventsHdsCreationTimestamps.numBackups:1}")
|
||||
private int systemEventsHdsCreationTimestampsNumBackups;
|
||||
@Value("${tip.wlan.hazelcast.rawEquipmentEventsHdsCreationTimestamps.numBackups:1}")
|
||||
private int rawEquipmentEventsHdsCreationTimestampsNumBackups;
|
||||
@Value("${tip.wlan.hazelcast.serviceMetricsHdsCreationTimestamps.numBackups:1}")
|
||||
private int serviceMetricsHdsCreationTimestampsNumBackups;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.systemEventsHdsCreationTimestamps.ttlSeconds:3600}")
|
||||
private int systemEventsHdsCreationTimestampsTtlSeconds;
|
||||
@Value("${tip.wlan.hazelcast.rawEquipmentEventsHdsCreationTimestamps.ttlSeconds:3600}")
|
||||
private int rawEquipmentEventsHdsCreationTimestampsTtlSeconds;
|
||||
@Value("${tip.wlan.hazelcast.serviceMetricsHdsCreationTimestamps.ttlSeconds:3600}")
|
||||
private int serviceMetricsHdsCreationTimestampsTtlSeconds;
|
||||
|
||||
/**
|
||||
* This map holds directory listings for files in HDS
|
||||
*/
|
||||
@Value("${tip.wlan.hazelcast.hdsDirectoryListingsMapName:hdsDirList-map}")
|
||||
private String hdsDirectoryListingsMapName;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.hdsDirectoryListings.numBackups:1}")
|
||||
private int hdsDirectoryListingsNumBackups;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.hdsDirectoryListings.ttlSeconds:9000}")
|
||||
private int hdsDirectoryListingsTtlSeconds;
|
||||
|
||||
/**
|
||||
* This map holds customer-equipmentId pairs taken from directory listings in HDS
|
||||
*/
|
||||
@Value("${tip.wlan.hazelcast.hdsDirectoryCustomerEquipmentMapName:hdsDirCustEq-map}")
|
||||
private String hdsDirectoryCustomerEquipmentMapName;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.hdsDirectoryCustomerEquipment.numBackups:1}")
|
||||
private int hdsDirectoryCustomerEquipmentNumBackups;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.hdsDirectoryCustomerEquipment.ttlSeconds:9000}")
|
||||
private int hdsDirectoryCustomerEquipmentTtlSeconds;
|
||||
|
||||
/**
|
||||
* These queues holds command listings for CNAs
|
||||
*/
|
||||
@Value("${tip.wlan.hazelcast.commands.queuePrefix:commands-q-}")
|
||||
private String commandListingsQueuePrefix;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.commands.numBackups:1}")
|
||||
private int commandListingsQueueNumBackups;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.commands.maxSize:5000}")
|
||||
private int commandListingsQueueMaxSize;
|
||||
|
||||
/**
|
||||
* These maps hold record indexes with TTL = 2 hrs
|
||||
*/
|
||||
@Value("${tip.wlan.hazelcast.recordIndexMapPrefix:recIdx-}")
|
||||
private String recordIndexMapPrefix;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.systemEventsRecordIndex.numBackups:1}")
|
||||
private int systemEventsRecordIndexNumBackups;
|
||||
@Value("${tip.wlan.hazelcast.rawEquipmentEventsRecordIndex.numBackups:1}")
|
||||
private int rawEquipmentEventsRecordIndexNumBackups;
|
||||
@Value("${tip.wlan.hazelcast.serviceMetricsRecordIndex.numBackups:1}")
|
||||
private int serviceMetricsRecordIndexNumBackups;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.systemEventsRecordIndex.ttlSeconds:7200}")
|
||||
private int systemEventsRecordIndexTtlSeconds;
|
||||
@Value("${tip.wlan.hazelcast.rawEquipmentEventsRecordIndex.ttlSeconds:7200}")
|
||||
private int rawEquipmentEventsRecordIndexTtlSeconds;
|
||||
@Value("${tip.wlan.hazelcast.serviceMetricsRecordIndex.ttlSeconds:7200}")
|
||||
private int serviceMetricsRecordIndexTtlSeconds;
|
||||
|
||||
|
||||
/**
|
||||
* This map holds names of hourly directories for which re-build of hourly index was requested
|
||||
* with TTL = 2 hrs
|
||||
*/
|
||||
@Value("${tip.wlan.hazelcast.buildInProgressHourlyDirectoryNamesMapPrefix:bipHrDirs-}")
|
||||
private String buildInProgressHourlyDirectoryNamesMapPrefix;
|
||||
@Value("${tip.wlan.hazelcast.buildInProgressHourlyDirectoryNames.numBackups:1}")
|
||||
private int buildInProgressHourlyDirectoryNamesNumBackups;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.buildInProgressHourlyDirectoryNames.ttlSeconds:7200}")
|
||||
private int buildInProgressHourlyDirectoryNamesTtlSeconds;
|
||||
|
||||
/**
|
||||
* This queue holds names of hourly directories for which re-build of hourly index was requested
|
||||
* K2HdsConnector reads from this queue and perform rebuilds of hourly indexes
|
||||
*/
|
||||
@Value("${tip.wlan.hazelcast.rebuildIdxHourlyDirectoryNamesQueue:rebuildHrIdxQueue-}")
|
||||
private String rebuildIdxHourlyDirectoryNamesQueue;
|
||||
|
||||
@Value("${tip.wlan.hazelcast.rebuildIdxHourlyDirectoryNamesQueue.numBackups:1}")
|
||||
private int rebuildIdxHourlyDirectoryNamesQueueNumBackups;
|
||||
|
||||
/**
|
||||
* Iterate through all instance variables, and for all that are String or int and that are null or 0 -
|
||||
* find @Value annotation and use it with SystemAndEnvPropertyResolver.
|
||||
*
|
||||
* @return HazelcastObjectsConfiguration constructed from System Properties or Environment Variables
|
||||
*/
|
||||
public static HazelcastObjectsConfiguration createOutsideOfSpringApp(){
|
||||
HazelcastObjectsConfiguration ret = new HazelcastObjectsConfiguration();
|
||||
return SystemAndEnvPropertyResolver.initOutsideOfSpringApp(ret);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
createOutsideOfSpringApp();
|
||||
}
|
||||
|
||||
public int getSystemEventsNumBackups() {
|
||||
return systemEventsNumBackups;
|
||||
}
|
||||
|
||||
public int getSystemEventsTtlSeconds() {
|
||||
return systemEventsTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* These maps hold last 10 minutes of system events - to be able to include in queries data that has not yet been written to HDS
|
||||
*/
|
||||
public String getSystemEventsMapPrefix() {
|
||||
return systemEventsMapPrefix;
|
||||
}
|
||||
|
||||
public int getRawEquipmentEventsNumBackups() {
|
||||
return rawEquipmentEventsNumBackups;
|
||||
}
|
||||
|
||||
public int getRawEquipmentEventsTtlSeconds() {
|
||||
return rawEquipmentEventsTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* These maps hold last 10 minutes of equipment events - to be able to include in queries data that has not yet been written to HDS
|
||||
*/
|
||||
public String getRawEquipmentEventsMapPrefix() {
|
||||
return rawEquipmentEventsMapPrefix;
|
||||
}
|
||||
|
||||
public int getServiceMetricsNumBackups() {
|
||||
return serviceMetricsNumBackups;
|
||||
}
|
||||
|
||||
public int getServiceMetricsTtlSeconds(){
|
||||
return serviceMetricsTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* These maps hold last 10 minutes of service metrics - to be able to include in queries data that has not yet been written to HDS
|
||||
*/
|
||||
public String getServiceMetricsMapPrefix() {
|
||||
return serviceMetricsMapPrefix;
|
||||
}
|
||||
|
||||
/**
|
||||
* These maps hold creationTimestamp for files in HDS
|
||||
*/
|
||||
public String getHdsCreationTimestampFileMapPrefix() {
|
||||
return hdsCreationTimestampFileMapPrefix;
|
||||
}
|
||||
|
||||
public int getSystemEventsHdsCreationTimestampsNumBackups(){
|
||||
return systemEventsHdsCreationTimestampsNumBackups;
|
||||
}
|
||||
|
||||
public int getRawEquipmentEventsHdsCreationTimestampsNumBackups(){
|
||||
return rawEquipmentEventsHdsCreationTimestampsNumBackups;
|
||||
}
|
||||
|
||||
public int getServiceMetricsHdsCreationTimestampsNumBackups(){
|
||||
return serviceMetricsHdsCreationTimestampsNumBackups;
|
||||
}
|
||||
|
||||
public int getSystemEventsHdsCreationTimestampsTtlSeconds(){
|
||||
return systemEventsHdsCreationTimestampsTtlSeconds;
|
||||
}
|
||||
|
||||
public int getRawEquipmentEventsHdsCreationTimestampsTtlSeconds(){
|
||||
return rawEquipmentEventsHdsCreationTimestampsTtlSeconds;
|
||||
}
|
||||
|
||||
public int getServiceMetricsHdsCreationTimestampsTtlSeconds(){
|
||||
return serviceMetricsHdsCreationTimestampsTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* These maps hold record indexes with TTL = 2 hrs
|
||||
*/
|
||||
public String getRecordIndexMapPrefix() {
|
||||
return recordIndexMapPrefix;
|
||||
}
|
||||
|
||||
public int getSystemEventsRecordIndexNumBackups(){
|
||||
return systemEventsRecordIndexNumBackups;
|
||||
}
|
||||
|
||||
public int getRawEquipmentEventsRecordIndexNumBackups(){
|
||||
return rawEquipmentEventsRecordIndexNumBackups;
|
||||
}
|
||||
|
||||
public int getServiceMetricsRecordIndexNumBackups(){
|
||||
return serviceMetricsRecordIndexNumBackups;
|
||||
}
|
||||
|
||||
public int getSystemEventsRecordIndexTtlSeconds(){
|
||||
return systemEventsRecordIndexTtlSeconds;
|
||||
}
|
||||
|
||||
public int getRawEquipmentEventsRecordIndexTtlSeconds(){
|
||||
return rawEquipmentEventsRecordIndexTtlSeconds;
|
||||
}
|
||||
|
||||
public int getServiceMetricsRecordIndexTtlSeconds(){
|
||||
return serviceMetricsRecordIndexTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* This map holds names of hourly directories for which re-build of hourly index was requested
|
||||
* with TTL = 2 hrs
|
||||
*/
|
||||
public String getBuildInProgressHourlyDirectoryNamesMapPrefix() {
|
||||
return buildInProgressHourlyDirectoryNamesMapPrefix;
|
||||
}
|
||||
|
||||
public int getBuildInProgressHourlyDirectoryNamesNumBackups(){
|
||||
return buildInProgressHourlyDirectoryNamesNumBackups;
|
||||
}
|
||||
|
||||
public int getBuildInProgressHourlyDirectoryNamesTtlSeconds(){
|
||||
return buildInProgressHourlyDirectoryNamesTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* This queue holds names of hourly directories for which re-build of hourly index was requested
|
||||
* K2HdsConnector reads from this queue and perform rebuilds of hourly indexes
|
||||
*/
|
||||
public String getRebuildIdxHourlyDirectoryNamesQueue() {
|
||||
return rebuildIdxHourlyDirectoryNamesQueue;
|
||||
}
|
||||
|
||||
public int getRebuildIdxHourlyDirectoryNamesQueueNumBackups(){
|
||||
return rebuildIdxHourlyDirectoryNamesQueueNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prefix for all rule agent queues. Those queues are of limited capacity.
|
||||
* If a caller cannot place a new message on the queue, the queue has to be
|
||||
* cleared and system event created.
|
||||
*/
|
||||
public String getRuleAgentQueuePrefix() {
|
||||
return ruleAgentQueuePrefix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the queue that all rule engine processes will read to get requests for new rule agent queue assignments.
|
||||
* It works together with unassignedRuleAgentQueuesMapName to reduce number of duplicate requests. Entries never expire.
|
||||
*/
|
||||
public String getUnassignedRuleAgentQueuesQName() {
|
||||
return unassignedRuleAgentQueuesQName;
|
||||
}
|
||||
|
||||
public int getUnassignedRuleAgentQueuesQNumBackups(){
|
||||
return unassignedRuleAgentQueuesQNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the map that keeps requests for new rule agent queue assignments.
|
||||
* It works together with unassignedRuleAgentQueuesQName to reduce number of duplicate requests.
|
||||
* Rule engine processes will remove an entry from this map when they successfully spawn a rule agent for a corresponding queue.
|
||||
*/
|
||||
public String getUnassignedRuleAgentQueuesMapName() {
|
||||
return unassignedRuleAgentQueuesMapName;
|
||||
}
|
||||
|
||||
public int getUnassignedRuleAgentQueuesMapNumBackups(){
|
||||
return unassignedRuleAgentQueuesMapNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the map that rule engine processes register with. Entries in this
|
||||
* map auto-expire after 2 minutes - if not re-registered by rule engine
|
||||
* process.
|
||||
*/
|
||||
public String getRuleAgentProcessMapName() {
|
||||
return ruleAgentProcessMapName;
|
||||
}
|
||||
|
||||
public int getRuleAgentProcessMapNumBackups(){
|
||||
return ruleAgentProcessMapNumBackups;
|
||||
}
|
||||
|
||||
public int getRuleAgentProcessMapTtlSeconds(){
|
||||
return ruleAgentProcessMapTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the map that keeps track of what rule engine process is responsible for each rule agent queue. Entries never expire.
|
||||
*/
|
||||
public String getRuleAgentQueueAssignmentsMapName() {
|
||||
return ruleAgentQueueAssignmentsMapName;
|
||||
}
|
||||
|
||||
public int getRuleAgentQueueAssignmentsMapNumBackups(){
|
||||
return ruleAgentQueueAssignmentsMapNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the map where rule engine processes publish their consumed capacity in events per second. Entries never expire.
|
||||
*/
|
||||
public String getRuleAgentProcessConsumedCapacityMapName() {
|
||||
return ruleAgentProcessConsumedCapacityMapName;
|
||||
}
|
||||
|
||||
public int getRuleAgentProcessConsumedCapacityMapNumBackups(){
|
||||
return ruleAgentProcessConsumedCapacityMapNumBackups;
|
||||
}
|
||||
|
||||
public int getRuleAgentProcessConsumedCapacityMapTtlSeconds(){
|
||||
return ruleAgentProcessConsumedCapacityMapTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the map that stores for every customerId what level the rule
|
||||
* agents are created: customer-city-building-floor. Entries are persisted
|
||||
* in RDBMS.
|
||||
*/
|
||||
public String getRuleEngineLevelAssignmentMapName() {
|
||||
return ruleEngineLevelAssignmentMapName;
|
||||
}
|
||||
|
||||
public int getRuleEngineLevelAssignmentMapNumBackups(){
|
||||
return ruleEngineLevelAssignmentMapNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the map that stores for every equipmentId its full path in
|
||||
* location hierarchy: i.e. eq_15 -> Cu_10_C_12_B_13_F_14. Entries are
|
||||
* persisted in RDBMS.
|
||||
*/
|
||||
public String getEquipmentFullPathMapName() {
|
||||
return equipmentFullPathMapName;
|
||||
}
|
||||
|
||||
public int getEquipmentFullPathMapNumBackups(){
|
||||
return equipmentFullPathMapNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the map that stores for every locationId its full path in
|
||||
* location hierarchy: i.e. F_14 -> Cu_10_C_12_B_13. Entries are
|
||||
* persisted in RDBMS.
|
||||
*/
|
||||
public String getLocationFullPathMapName() {
|
||||
return locationFullPathMapName;
|
||||
}
|
||||
|
||||
public int getLocationFullPathMapNumBackups(){
|
||||
return locationFullPathMapNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the semaphore that coordinates work between RE work distribution monitors
|
||||
*/
|
||||
public String getRuleEngineWorkDistributionSemaphoreName() {
|
||||
return ruleEngineWorkDistributionSemaphoreName;
|
||||
}
|
||||
|
||||
public int getRuleEngineWorkDistributionSemaphoreNumBackups(){
|
||||
return ruleEngineWorkDistributionSemaphoreNumBackups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Name of the atomic long that stores timestamp of the last scan completed by the RE work distribution monitor
|
||||
*/
|
||||
public String getRuleEngineWorkDistributionLastmodTimestampName() {
|
||||
return ruleEngineWorkDistributionLastmodTimestampName;
|
||||
}
|
||||
|
||||
|
||||
public String getRuleEngineUnasignedQueueLastmodTimestampName() {
|
||||
return ruleEngineUnasignedQueueLastmodTimestampName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Max number of events a rule agent queue can hold until it starts blocking
|
||||
*/
|
||||
public int getRuleAgentQueueMaxSize() {
|
||||
return ruleAgentQueueMaxSize;
|
||||
}
|
||||
|
||||
public int getRuleAgentQueueNumBackups() {
|
||||
return ruleAgentQueueNumBackups;
|
||||
}
|
||||
|
||||
public String getRuleAgentInitialReservedCapacityMapName() {
|
||||
return ruleAgentInitialReservedCapacityMapName;
|
||||
}
|
||||
|
||||
/**
|
||||
* This map holds directory listings for files in HDS
|
||||
*/
|
||||
public String getHdsDirectoryListingsMapName() {
|
||||
return hdsDirectoryListingsMapName;
|
||||
}
|
||||
|
||||
public int getHdsDirectoryListingsNumBackups() {
|
||||
return hdsDirectoryListingsNumBackups;
|
||||
}
|
||||
|
||||
public int getHdsDirectoryListingsTtlSeconds() {
|
||||
return hdsDirectoryListingsTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* This map holds customer-equipment mappings taken from directory listings in HDS
|
||||
*/
|
||||
public String getHdsDirectoryCustomerEquipmentMapName() {
|
||||
return hdsDirectoryCustomerEquipmentMapName;
|
||||
}
|
||||
|
||||
public int getHdsDirectoryCustomerEquipmentNumBackups() {
|
||||
return hdsDirectoryCustomerEquipmentNumBackups;
|
||||
}
|
||||
|
||||
public int getHdsDirectoryCustomerEquipmentTtlSeconds() {
|
||||
return hdsDirectoryCustomerEquipmentTtlSeconds;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the prefix for queues that holds commands to be executed by a CNA
|
||||
*/
|
||||
public String getCommandListingsQueuePrefix() {
|
||||
return commandListingsQueuePrefix;
|
||||
}
|
||||
|
||||
public int getCommandListingsQueueMaxSize() { return commandListingsQueueMaxSize; }
|
||||
|
||||
public int getCommandListingsQueueNumBackups() { return commandListingsQueueNumBackups; }
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
package com.telecominfraproject.wlan.hazelcast.common;
|
||||
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import com.hazelcast.query.Predicate;
|
||||
|
||||
public class SamplePredicate implements Predicate<String, String> {
|
||||
private static final long serialVersionUID = 1868230836762136031L;
|
||||
|
||||
@Override
|
||||
public boolean apply(Entry<String, String> mapEntry) {
|
||||
return (Integer.parseInt(mapEntry.getValue())%2==0);
|
||||
}
|
||||
|
||||
}
|
||||
Reference in New Issue
Block a user