Compare commits

...

2 Commits

Author SHA1 Message Date
Dmitry Toptygin
d64e6dc092 WIFI-7556 updated spring-boot version to 2.5.12 (spring 5.3.18) to cover for Springshell vulnerability (#26) 2022-04-05 13:55:13 -04:00
norm-traxler
7897ccdf21 Merge pull request #25 from Telecominfraproject/WIFI-4732
Wifi 4732 EquipmentPortalController resets to defaults channels for e…
2021-10-20 16:17:30 -04:00
17 changed files with 271 additions and 250 deletions

View File

@@ -10,14 +10,6 @@
<name>base-hazelcast-client</name>
<description>Common classes for accessing Hazelcast in-memory grid data store.</description>
<dependencies>
<dependency>
<groupId>com.hazelcast</groupId>
<artifactId>hazelcast</artifactId>
</dependency>
<dependency>
<groupId>com.hazelcast</groupId>
<artifactId>hazelcast-client</artifactId>
</dependency>
<dependency>
<groupId>com.telecominfraproject.wlan</groupId>
<artifactId>common-hazelcast</artifactId>

View File

@@ -10,7 +10,6 @@ import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Profile;
import com.hazelcast.config.Config;
import com.hazelcast.config.GroupConfig;
import com.hazelcast.config.InterfacesConfig;
import com.hazelcast.config.JoinConfig;
import com.hazelcast.config.MapConfig;
@@ -19,7 +18,6 @@ import com.hazelcast.config.QueueConfig;
import com.hazelcast.config.TcpIpConfig;
import com.hazelcast.core.Hazelcast;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.spi.properties.GroupProperty;
@Configuration
@Profile("HazelcastForUnitTest")
@@ -69,12 +67,14 @@ public class HazelcastForUnitTest {
public HazelcastInstance hazelcastInstanceTest() {
// this is used for experiments and unit tests
Config config = new Config();
config.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
config.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
// config.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
// config.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
GroupConfig groupConfig = new GroupConfig(System.getProperty("tip.wlan.hazelcast.groupName", "wc-dev"));
groupConfig.setPassword(System.getProperty("tip.wlan.hazelcast.groupPassword", "wc-dev-pass"));
config.setGroupConfig(groupConfig);
// GroupConfig groupConfig = new GroupConfig(System.getProperty("tip.wlan.hazelcast.groupName", "wc-dev"));
// groupConfig.setPassword(System.getProperty("tip.wlan.hazelcast.groupPassword", "wc-dev-pass"));
// config.setGroupConfig(groupConfig);
config.setClusterName(System.getProperty("tip.wlan.hazelcast.clusterName", "wc-dev"));
config.getNetworkConfig().setPublicAddress("127.0.0.1").setPort(5900).setPortAutoIncrement(true)
.setInterfaces(new InterfacesConfig().addInterface("127.0.0.1"));

View File

@@ -14,8 +14,8 @@ import org.springframework.context.annotation.Profile;
import org.springframework.core.env.Environment;
import com.hazelcast.client.config.ClientConfig;
import com.hazelcast.config.security.UsernamePasswordIdentityConfig;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.spi.properties.GroupProperty;
import com.telecominfraproject.wlan.core.model.utils.SystemAndEnvPropertyResolver;
/**
@@ -142,10 +142,11 @@ public class HazelcastClientConfiguration {
@Bean
public HazelcastInstance hazelcastClientUnicast() {
ClientConfig clientConfig = new ClientConfig();
clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
// clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
// clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
clientConfig.getGroupConfig().setName(groupName).setPassword(groupPassword);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(groupName, groupPassword));
for (String addrStr : nodeAddressesStr.split(",")) {
clientConfig.getNetworkConfig().addAddress(addrStr);
}
@@ -156,7 +157,7 @@ public class HazelcastClientConfiguration {
clientConfig.getNetworkConfig().setSmartRouting(false);
// the client will attempt to re-connect to the cluster forever if
// cluster is not available
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance client = new ReConnectingHazelcastClient(clientConfig, reconnectTimeSec);

View File

@@ -1,47 +1,42 @@
package com.telecominfraproject.wlan.hazelcast.client;
import java.util.Collection;
import java.util.UUID;
import java.util.concurrent.ConcurrentMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.hazelcast.cardinality.CardinalityEstimator;
import com.hazelcast.client.ClientService;
import com.hazelcast.client.HazelcastClient;
import com.hazelcast.client.config.ClientConfig;
import com.hazelcast.cluster.Cluster;
import com.hazelcast.cluster.Endpoint;
import com.hazelcast.collection.IList;
import com.hazelcast.collection.IQueue;
import com.hazelcast.collection.ISet;
import com.hazelcast.config.Config;
import com.hazelcast.core.ClientService;
import com.hazelcast.core.Cluster;
import com.hazelcast.core.DistributedObject;
import com.hazelcast.core.DistributedObjectListener;
import com.hazelcast.core.Endpoint;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IAtomicLong;
import com.hazelcast.core.IAtomicReference;
import com.hazelcast.core.ICacheManager;
import com.hazelcast.core.ICountDownLatch;
import com.hazelcast.core.IExecutorService;
import com.hazelcast.core.IList;
import com.hazelcast.core.ILock;
import com.hazelcast.core.IMap;
import com.hazelcast.core.IQueue;
import com.hazelcast.core.ISemaphore;
import com.hazelcast.core.ISet;
import com.hazelcast.core.ITopic;
import com.hazelcast.core.IdGenerator;
import com.hazelcast.core.LifecycleService;
import com.hazelcast.core.MultiMap;
import com.hazelcast.core.PartitionService;
import com.hazelcast.core.ReplicatedMap;
import com.hazelcast.cp.CPSubsystem;
import com.hazelcast.crdt.pncounter.PNCounter;
import com.hazelcast.durableexecutor.DurableExecutorService;
import com.hazelcast.flakeidgen.FlakeIdGenerator;
import com.hazelcast.logging.LoggingService;
import com.hazelcast.mapreduce.JobTracker;
import com.hazelcast.quorum.QuorumService;
import com.hazelcast.map.IMap;
import com.hazelcast.multimap.MultiMap;
import com.hazelcast.partition.PartitionService;
import com.hazelcast.replicatedmap.ReplicatedMap;
import com.hazelcast.ringbuffer.Ringbuffer;
import com.hazelcast.scheduledexecutor.IScheduledExecutorService;
import com.hazelcast.splitbrainprotection.SplitBrainProtectionService;
import com.hazelcast.sql.SqlService;
import com.hazelcast.topic.ITopic;
import com.hazelcast.transaction.HazelcastXAResource;
import com.hazelcast.transaction.TransactionContext;
import com.hazelcast.transaction.TransactionException;
@@ -150,224 +145,157 @@ public class ReConnectingHazelcastClient implements HazelcastInstance {
}
return this.client;
}
@Override
public String getName() {
return getClient().getName();
}
@Override
public <E> IQueue<E> getQueue(String name) {
return getClient().getQueue(name);
}
@Override
public <E> ITopic<E> getTopic(String name) {
return getClient().getTopic(name);
}
@Override
public <E> ISet<E> getSet(String name) {
return getClient().getSet(name);
}
@Override
public <E> IList<E> getList(String name) {
return getClient().getList(name);
}
@Override
public <K, V> IMap<K, V> getMap(String name) {
return getClient().getMap(name);
}
@Override
public <K, V> ReplicatedMap<K, V> getReplicatedMap(String name) {
return getClient().getReplicatedMap(name);
}
@Override
public JobTracker getJobTracker(String name) {
return getClient().getJobTracker(name);
}
@Override
public <K, V> MultiMap<K, V> getMultiMap(String name) {
return getClient().getMultiMap(name);
}
@Override
public ILock getLock(String key) {
return getClient().getLock(key);
}
@Override
public <E> Ringbuffer<E> getRingbuffer(String name) {
return getClient().getRingbuffer(name);
}
@Override
public <E> ITopic<E> getReliableTopic(String name) {
return getClient().getReliableTopic(name);
}
@Override
public Cluster getCluster() {
return getClient().getCluster();
}
@Override
public Endpoint getLocalEndpoint() {
return getClient().getLocalEndpoint();
}
@Override
public IExecutorService getExecutorService(String name) {
return getClient().getExecutorService(name);
}
@Override
public <T> T executeTransaction(TransactionalTask<T> task) throws TransactionException {
return getClient().executeTransaction(task);
}
@Override
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task) throws TransactionException {
return getClient().executeTransaction(options, task);
}
@Override
public TransactionContext newTransactionContext() {
return getClient().newTransactionContext();
}
@Override
public TransactionContext newTransactionContext(TransactionOptions options) {
return getClient().newTransactionContext(options);
}
@Override
public IdGenerator getIdGenerator(String name) {
return getClient().getIdGenerator(name);
}
@Override
public IAtomicLong getAtomicLong(String name) {
return getClient().getAtomicLong(name);
}
@Override
public <E> IAtomicReference<E> getAtomicReference(String name) {
return getClient().getAtomicReference(name);
}
@Override
public ICountDownLatch getCountDownLatch(String name) {
return getClient().getCountDownLatch(name);
}
@Override
public ISemaphore getSemaphore(String name) {
return getClient().getSemaphore(name);
}
@Override
public Collection<DistributedObject> getDistributedObjects() {
return getClient().getDistributedObjects();
}
@Override
public String addDistributedObjectListener(DistributedObjectListener distributedObjectListener) {
return getClient().addDistributedObjectListener(distributedObjectListener);
}
@Override
public boolean removeDistributedObjectListener(String registrationId) {
return getClient().removeDistributedObjectListener(registrationId);
}
@Override
public Config getConfig() {
return getClient().getConfig();
}
@Override
public PartitionService getPartitionService() {
return getClient().getPartitionService();
}
@Override
public QuorumService getQuorumService() {
return getClient().getQuorumService();
}
@Override
public ClientService getClientService() {
return getClient().getClientService();
}
@Override
public LoggingService getLoggingService() {
return getClient().getLoggingService();
}
@Override
public LifecycleService getLifecycleService() {
return getClient().getLifecycleService();
}
@Override
public <T extends DistributedObject> T getDistributedObject(String serviceName, String name) {
return getClient().getDistributedObject(serviceName, name);
}
@Override
public ConcurrentMap<String, Object> getUserContext() {
return getClient().getUserContext();
}
@Override
public HazelcastXAResource getXAResource() {
return getClient().getXAResource();
}
@Override
public void shutdown() {
getClient().shutdown();
}
@Override
public DurableExecutorService getDurableExecutorService(String name) {
return getClient().getDurableExecutorService(name);
}
@Override
public <T> T executeTransaction(TransactionalTask<T> task) throws TransactionException {
return getClient().executeTransaction(task);
}
public <T> T executeTransaction(TransactionOptions options, TransactionalTask<T> task) throws TransactionException {
return getClient().executeTransaction(options, task);
}
public TransactionContext newTransactionContext() {
return getClient().newTransactionContext();
}
public TransactionContext newTransactionContext(TransactionOptions options) {
return getClient().newTransactionContext(options);
}
public FlakeIdGenerator getFlakeIdGenerator(String name) {
return getClient().getFlakeIdGenerator(name);
}
public Collection<DistributedObject> getDistributedObjects() {
return getClient().getDistributedObjects();
}
public UUID addDistributedObjectListener(DistributedObjectListener distributedObjectListener) {
return getClient().addDistributedObjectListener(distributedObjectListener);
}
public boolean removeDistributedObjectListener(UUID registrationId) {
return getClient().removeDistributedObjectListener(registrationId);
}
public Config getConfig() {
return getClient().getConfig();
}
public PartitionService getPartitionService() {
return getClient().getPartitionService();
}
public SplitBrainProtectionService getSplitBrainProtectionService() {
return getClient().getSplitBrainProtectionService();
}
public ClientService getClientService() {
return getClient().getClientService();
}
public LoggingService getLoggingService() {
return getClient().getLoggingService();
}
public LifecycleService getLifecycleService() {
return getClient().getLifecycleService();
}
public <T extends DistributedObject> T getDistributedObject(String serviceName, String name) {
return getClient().getDistributedObject(serviceName, name);
}
public ConcurrentMap<String, Object> getUserContext() {
return getClient().getUserContext();
}
public HazelcastXAResource getXAResource() {
return getClient().getXAResource();
}
public ICacheManager getCacheManager() {
return getClient().getCacheManager();
}
@Override
public CardinalityEstimator getCardinalityEstimator(String name) {
return getClient().getCardinalityEstimator(name);
}
@Override
public PNCounter getPNCounter(String name) {
return getClient().getPNCounter(name);
}
public IScheduledExecutorService getScheduledExecutorService(String name) {
return getClient().getScheduledExecutorService(name);
}
@Override
public PNCounter getPNCounter(String name) {
return getClient().getPNCounter(name);
}
@Override
public CPSubsystem getCPSubsystem() {
return getClient().getCPSubsystem();
return getClient().getCPSubsystem();
}
public SqlService getSql() {
return getClient().getSql();
}
public void shutdown() {
getClient().shutdown();
}
@Override
public FlakeIdGenerator getFlakeIdGenerator(String name) {
return getClient().getFlakeIdGenerator(name);
}
}

View File

@@ -6,8 +6,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -16,15 +16,16 @@ import org.slf4j.LoggerFactory;
import com.hazelcast.client.HazelcastClient;
import com.hazelcast.client.config.ClientConfig;
import com.hazelcast.collection.IQueue;
import com.hazelcast.collection.impl.queue.QueueService;
import com.hazelcast.config.security.UsernamePasswordIdentityConfig;
import com.hazelcast.core.DistributedObject;
import com.hazelcast.core.DistributedObjectEvent;
import com.hazelcast.core.DistributedObjectListener;
import com.hazelcast.core.EntryEvent;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap;
import com.hazelcast.core.IQueue;
import com.hazelcast.core.IdGenerator;
import com.hazelcast.flakeidgen.FlakeIdGenerator;
import com.hazelcast.map.IMap;
import com.hazelcast.map.listener.EntryAddedListener;
import com.hazelcast.map.listener.EntryEvictedListener;
import com.hazelcast.map.listener.EntryRemovedListener;
@@ -35,7 +36,8 @@ public class TestClient {
public static void main_1(String[] args) throws IOException {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName("wc-dev").setPassword("wc-dev-pass");
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig("wc-dev", "wc-dev-pass"));
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5900");
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5901");
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5902");
@@ -53,7 +55,7 @@ public class TestClient {
System.out.println("metricsMap["+ entry.getKey() +"]: " +entry.getValue());
}
IdGenerator testIdGenerator = client.getIdGenerator("id_generator_created_from_client");
FlakeIdGenerator testIdGenerator = client.getFlakeIdGenerator("id_generator_created_from_client");
Map<Long, String> mapCreatedFromClient = client.getMap( "map_created_from_client" );
System.out.println("mapCreatedFromClient: " + mapCreatedFromClient);
@@ -120,7 +122,8 @@ public class TestClient {
public static void main_2(String[] args) throws InterruptedException {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5901");
clientConfig.getNetworkConfig().addAddress("127.0.0.1:5902");
@@ -239,7 +242,7 @@ public class TestClient {
public static void main_3(String[] args) throws InterruptedException, ExecutionException {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
// clientConfig.getNetworkConfig().addAddress("127.0.0.1:5901");
// clientConfig.getNetworkConfig().addAddress("127.0.0.1:5902");
@@ -247,7 +250,7 @@ public class TestClient {
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
HazelcastInstance client = new ReConnectingHazelcastClient(clientConfig, 20);
@@ -259,7 +262,7 @@ public class TestClient {
if(Boolean.getBoolean("populateMapBeforeTest")){
@SuppressWarnings("rawtypes")
List<Future> futures = new ArrayList<>(1000);
List<CompletionStage<String>> futures = new ArrayList<>(1000);
//populate map with entries
for(int i=0; i<1000000; i++){
@@ -280,12 +283,12 @@ public class TestClient {
// }
// }
futures.add(client.getMap("testMap").putAsync("t_" + Integer.toString(fI), Integer.toString(fI)));
futures.add(client.<String, String>getMap("testMap").putAsync("t_" + Integer.toString(fI), Integer.toString(fI)));
//wait for a batch of futures to complete
if(futures.size()>=990){
for(Future<?> f: futures){
f.get();
for(CompletionStage<String> f: futures){
f.toCompletableFuture().get();
}
futures.clear();
@@ -293,8 +296,8 @@ public class TestClient {
}
for(Future<?> f: futures){
f.get();
for(CompletionStage<String> f: futures){
f.toCompletableFuture().get();
}
end = System.currentTimeMillis();
@@ -323,7 +326,7 @@ public class TestClient {
evenCount = 0;
Predicate<String, String> predicate = new SamplePredicate();
for(Map.Entry<?, ?> entry: client.getMap( "testMap" ).entrySet(predicate )){
for(Map.Entry<String, String> entry: client.<String, String>getMap( "testMap" ).entrySet(predicate )){
evenCount++;
}
@@ -355,13 +358,13 @@ public class TestClient {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
@@ -386,13 +389,13 @@ public class TestClient {
public static void main_5(String[] args) {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
@@ -426,13 +429,13 @@ public class TestClient {
public static void main_6(String[] args) {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
@@ -464,13 +467,13 @@ public class TestClient {
public static void main_7(String[] args) {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
@@ -523,13 +526,13 @@ public class TestClient {
public static void main(String[] args) {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance hazelcastClient = HazelcastClient.newHazelcastClient(clientConfig);
@@ -598,13 +601,13 @@ public class TestClient {
public static void main_9(String[] args) throws InterruptedException {
ClientConfig clientConfig = new ClientConfig();
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance hazelcastClient = HazelcastClient.newHazelcastClient(clientConfig);
DistributedObjectListener distributedObjectListener = new DistributedObjectListener() {

View File

@@ -5,9 +5,9 @@ import java.util.Set;
import com.hazelcast.client.HazelcastClient;
import com.hazelcast.client.config.ClientConfig;
import com.hazelcast.config.security.UsernamePasswordIdentityConfig;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap;
import com.hazelcast.spi.properties.GroupProperty;
import com.hazelcast.map.IMap;
import com.telecominfraproject.wlan.hazelcast.common.HazelcastObjectsConfiguration;
public class GetValues
@@ -25,10 +25,10 @@ public class GetValues
// }
ClientConfig clientConfig = new ClientConfig();
clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
// clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
// clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);

View File

@@ -2,9 +2,9 @@ package com.telecominfraproject.wlan.hazelcast.client.clu;
import com.hazelcast.client.HazelcastClient;
import com.hazelcast.client.config.ClientConfig;
import com.hazelcast.config.security.UsernamePasswordIdentityConfig;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap;
import com.hazelcast.spi.properties.GroupProperty;
import com.hazelcast.map.IMap;
public class SetProvisionedCapacity {
@@ -21,16 +21,14 @@ public class SetProvisionedCapacity {
ClientConfig clientConfig = new ClientConfig();
clientConfig.setProperty(GroupProperty.LOGGING_TYPE.getName(), "slf4j");
clientConfig.setProperty(GroupProperty.PHONE_HOME_ENABLED.getName(), "false");
clientConfig.getGroupConfig().setName(clusterName).setPassword(password);
clientConfig.getSecurityConfig().setUsernamePasswordIdentityConfig(new UsernamePasswordIdentityConfig(clusterName, password));
clientConfig.getNetworkConfig().addAddress(addr);
//see http://docs.hazelcast.org/docs/3.6/manual/html-single/index.html#java-client-operation-modes
// here we're using "dumb" client that connects only to a single node of the cluster
clientConfig.getNetworkConfig().setSmartRouting(false);
clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
//clientConfig.getNetworkConfig().setConnectionAttemptLimit(0);
HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);

View File

@@ -27,6 +27,7 @@ import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
@@ -41,9 +42,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.StreamUtils;
import com.hazelcast.collection.IQueue;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap;
import com.hazelcast.core.IQueue;
import com.hazelcast.map.IMap;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.Timer;
import com.telecominfraproject.wlan.cloudmetrics.CloudMetricsUtils;
@@ -495,7 +496,8 @@ public class HierarchicalDatastore{
try {
LOG.trace("submitting append request to hazelcast");
//submit operation and wait for its completion
dirListMap.submitToKey(dirKey, new AppendStringToSetEntryProcessor(shortFileName) ).get();
CompletionStage<Set<String>> cs = dirListMap.submitToKey(dirKey, new AppendStringToSetEntryProcessor(shortFileName) );
cs.toCompletableFuture().get();
LOG.trace("append request is processed in hazelcast");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();

View File

@@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit;
import javax.annotation.PostConstruct;
import org.junit.AfterClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
@@ -35,7 +36,7 @@ import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit4.SpringRunner;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap;
import com.hazelcast.map.IMap;
import com.telecominfraproject.wlan.core.model.filter.EntryFilter;
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest;
@@ -59,6 +60,7 @@ import com.telecominfraproject.wlan.hierarchical.datastore.writer.StreamHolder;
PropertySourcesPlaceholderConfigurer.class, //must have this to resolve non-string @Value annotations, i.e. int properties, etc.
})
@ActiveProfiles({"HazelcastForUnitTest"})
@Ignore("DT: these compoinents are not used for now, re-enable the tests if and when they are back in the system")
public class HierarchicalDatastoreHourlyIndexTests {
static{

View File

@@ -30,7 +30,7 @@ import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit4.SpringRunner;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap;
import com.hazelcast.map.IMap;
import com.telecominfraproject.wlan.core.model.filter.EntryFilter;
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
import com.telecominfraproject.wlan.hazelcast.HazelcastForUnitTest;
@@ -53,6 +53,7 @@ import com.telecominfraproject.wlan.hierarchical.datastore.writer.StreamHolder;
PropertySourcesPlaceholderConfigurer.class, //must have this to resolve non-string @Value annotations, i.e. int properties, etc.
})
@ActiveProfiles({"HazelcastForUnitTest"})
@Ignore("DT: these compoinents are not used for now, re-enable the tests if and when they are back in the system")
public class HierarchicalDatastoreRecordIndexTests {
static{

View File

@@ -26,6 +26,7 @@ import java.util.zip.ZipOutputStream;
import javax.annotation.PostConstruct;
import org.junit.AfterClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
@@ -57,6 +58,7 @@ import com.telecominfraproject.wlan.hierarchical.datastore.index.registry.Record
HazelcastObjectsConfiguration.class,
})
@ActiveProfiles({"HazelcastForUnitTest"})
@Ignore("DT: these compoinents are not used for now, re-enable the tests if and when they are back in the system")
public class HierarchicalDatastoreTests {
static{

View File

@@ -3,6 +3,7 @@ package com.telecominfraproject.wlan.core.server.jdbc;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
@@ -1092,7 +1093,80 @@ class JdbcOperationsWithMetrics implements JdbcOperations{
}
}
@Override
public <T> Stream<T> queryForStream(String sql, RowMapper<T> rowMapper) throws DataAccessException {
queriesExecuted.increment();
Stopwatch s = queriesTimer.start();
boolean success = false;
try{
Stream<T> ret = delegate.queryForStream(sql, rowMapper);
success = true;
return ret;
}finally{
s.stop();
if(!success){
queriesErrors.increment();
}
}
}
@Override
public <T> Stream<T> queryForStream(PreparedStatementCreator psc, RowMapper<T> rowMapper) throws DataAccessException {
queriesExecuted.increment();
Stopwatch s = queriesTimer.start();
boolean success = false;
try{
Stream<T> ret = delegate.queryForStream(psc, rowMapper);
success = true;
return ret;
}finally{
s.stop();
if(!success){
queriesErrors.increment();
}
}
}
@Override
public <T> Stream<T> queryForStream(String sql, PreparedStatementSetter pss, RowMapper<T> rowMapper) throws DataAccessException {
queriesExecuted.increment();
Stopwatch s = queriesTimer.start();
boolean success = false;
try{
Stream<T> ret = delegate.queryForStream(sql, pss, rowMapper);
success = true;
return ret;
}finally{
s.stop();
if(!success){
queriesErrors.increment();
}
}
}
@Override
public <T> Stream<T> queryForStream(String sql, RowMapper<T> rowMapper, Object... args) throws DataAccessException {
queriesExecuted.increment();
Stopwatch s = queriesTimer.start();
boolean success = false;
try{
Stream<T> ret = delegate.queryForStream(sql, rowMapper, args);
success = true;
return ret;
}finally{
s.stop();
if(!success){
queriesErrors.increment();
}
}
}
@Override
public String toString() {
return "JOWM-"+delegate.toString();

View File

@@ -45,11 +45,11 @@ public class BaseJsonTypeResolverBuilder extends StdTypeResolverBuilder {
@Override
protected Object _deserializeTypedUsingDefaultImpl(final JsonParser jp, DeserializationContext ctxt,
TokenBuffer tb) throws IOException {
TokenBuffer tb, String priorFailureMsg) throws IOException {
Object obj = null;
JsonParser parser = jp;
try{
obj = super._deserializeTypedUsingDefaultImpl(parser, ctxt, tb);
obj = super._deserializeTypedUsingDefaultImpl(parser, ctxt, tb, priorFailureMsg);
}catch(Exception e){
JsonDeserializer<Object> deser = ctxt.findContextualValueDeserializer(this._baseType, null);
if (deser != null) {
@@ -109,7 +109,7 @@ public class BaseJsonTypeResolverBuilder extends StdTypeResolverBuilder {
return ret;
}
public void setPropertName(String property) {
public void setPropertyName(String property) {
super._typeProperty = property;
}
}

View File

@@ -9,8 +9,9 @@ import org.slf4j.LoggerFactory;
import org.springframework.scheduling.annotation.Scheduled;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.ILock;
import com.hazelcast.core.IMap;
import com.hazelcast.cp.CPSubsystem;
import com.hazelcast.cp.lock.FencedLock;
import com.hazelcast.map.IMap;
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
/**
@@ -65,7 +66,9 @@ public class DynamicServicePartitioner implements ServicePartitionerInterface {
IMap<String, byte[]> partitionerMap = hazelcastInstance.getMap(mapName);
// lock the whole partitionerMap while refresh is happening
ILock mapLock = hazelcastInstance.getLock("lock_" + mapName);
// see https://docs.hazelcast.com/imdg/4.2/migration-guides
CPSubsystem cpSubsystem = hazelcastInstance.getCPSubsystem();
FencedLock mapLock = cpSubsystem.getLock("lock_" + mapName);
mapLock.lock();
try {

View File

@@ -10,7 +10,7 @@ import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IMap;
import com.hazelcast.map.IMap;
/**
* @author yongli

View File

@@ -4,8 +4,10 @@ import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import java.util.function.BiFunction;
import com.hazelcast.map.impl.ComputeEntryProcessor;
import com.hazelcast.map.AbstractEntryProcessor;
/**
* This class appends an item to a List<Long> stored in a hazelcast map.
@@ -17,11 +19,24 @@ import com.hazelcast.map.AbstractEntryProcessor;
* <b>Very important</b>: this class must implement Serializable interface because it is submitted to Hazelcast Cluster
* @author dtop
*/
public class AppendLongToListEntryProcessor extends AbstractEntryProcessor<String, List<Long>> implements Serializable {
public class AppendLongToListEntryProcessor extends ComputeEntryProcessor<String, List<Long>> implements Serializable {
private static final long serialVersionUID = -6960225265547599510L;
private long tsToAppend;
// private BiFunction<String, List<Long>, List<Long>> biFunction = (key, value) -> {
// if(value==null){
// value = new ArrayList<>();
// }
//
// // process and modify value
// if(!value.contains(tsToAppend)){
// value.add(tsToAppend);
// }
//
// return value;
// };
public AppendLongToListEntryProcessor() {
// for serialization
}
@@ -31,7 +46,7 @@ public class AppendLongToListEntryProcessor extends AbstractEntryProcessor<Strin
}
@Override
public Object process(Entry<String, List<Long>> entry) {
public List<Long> process(Entry<String, List<Long>> entry) {
List<Long> value = entry.getValue();
if(value==null){
@@ -44,6 +59,6 @@ public class AppendLongToListEntryProcessor extends AbstractEntryProcessor<Strin
}
entry.setValue(value);
return true;
return value;
}
}

View File

@@ -5,7 +5,7 @@ import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Set;
import com.hazelcast.map.AbstractEntryProcessor;
import com.hazelcast.map.impl.ComputeEntryProcessor;
/**
* This class appends an item to a Set<String> stored in a hazelcast map.
@@ -17,7 +17,7 @@ import com.hazelcast.map.AbstractEntryProcessor;
* <b>Very important</b>: this class must implement Serializable interface because it is submitted to Hazelcast Cluster
* @author dtop
*/
public class AppendStringToSetEntryProcessor extends AbstractEntryProcessor<String, Set<String>> implements Serializable {
public class AppendStringToSetEntryProcessor extends ComputeEntryProcessor<String, Set<String>> implements Serializable {
private static final long serialVersionUID = -6960225265547599510L;
private String stringToAppend;
@@ -31,7 +31,7 @@ public class AppendStringToSetEntryProcessor extends AbstractEntryProcessor<Stri
}
@Override
public Object process(Entry<String, Set<String>> entry) {
public Set<String> process(Entry<String, Set<String>> entry) {
Set<String> value = entry.getValue();
if(value==null){
@@ -43,6 +43,6 @@ public class AppendStringToSetEntryProcessor extends AbstractEntryProcessor<Stri
entry.setValue(value);
return true;
return value;
}
}