[WIFI-2068] Change client session TTL to 24 hours

This commit is contained in:
Norm Traxler
2021-04-26 12:00:28 -04:00
parent 02d2e1cef4
commit dca76d8a6e
182 changed files with 173 additions and 1546 deletions

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
adoption-metrics-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
adoption-metrics-service/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
adoption-metrics-sp/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-datastore-cassandra/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
alarm-datastore-inmemory/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-datastore-interface/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-datastore-rdbms/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-service-interface/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-service-local/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-service-remote/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
alarm-service/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -278,18 +278,6 @@
<groupId>com.telecominfraproject.wlan</groupId>
<version>1.1.0-SNAPSHOT</version>
</dependency>
<dependency>
<artifactId>client-service</artifactId>
<groupId>com.telecominfraproject.wlan</groupId>
<version>1.1.0-SNAPSHOT</version>
</dependency>
<dependency>
<artifactId>client-datastore-rdbms</artifactId>
<groupId>com.telecominfraproject.wlan</groupId>
<version>1.1.0-SNAPSHOT</version>
</dependency>
<dependency>
<artifactId>routing-service</artifactId>

1
all-cloud-in-one-process/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
client-datastore-cassandra/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -42,8 +42,8 @@ CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session (
details blob,
PRIMARY KEY ((customerId, macAddress), equipmentId)
) WITH comment='Wireless client sessions used by TIP WLAN CloudSDK. Records automatically expire after 30 days'
AND default_time_to_live = 2592000;
) WITH comment='Wireless client sessions used by TIP WLAN CloudSDK. Records automatically expire after 24 hours'
AND default_time_to_live = 86400;
CREATE INDEX IF NOT EXISTS idx_client_session_customerId ON tip_wlan_keyspace.client_session (customerId);
@@ -53,8 +53,8 @@ CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session_by_equipment (
macAddress bigint ,
PRIMARY KEY ((customerId, equipmentId), macAddress)
) WITH comment='Index Table to look up wireless client sessions by equipmentId used by TIP WLAN CloudSDK. Records automatically expire after 30 days'
AND default_time_to_live = 2592000;
) WITH comment='Index Table to look up wireless client sessions by equipmentId used by TIP WLAN CloudSDK. Records automatically expire after 24 hours'
AND default_time_to_live = 86400;
CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session_by_location (
customerId int,
@@ -63,8 +63,8 @@ CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session_by_location (
macAddress bigint,
PRIMARY KEY ((locationId), equipmentId, macAddress)
) WITH comment='Index Table to look up wireless client sessions by location and equipment used by TIP WLAN CloudSDK. Records automatically expire after 30 days'
AND default_time_to_live = 2592000;
) WITH comment='Index Table to look up wireless client sessions by location and equipment used by TIP WLAN CloudSDK. Records automatically expire after 24 hours'
AND default_time_to_live = 86400;
CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session_by_mac (
customerId int,
@@ -74,8 +74,8 @@ CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session_by_mac (
macAddressString text,
PRIMARY KEY ((macAddress), locationId, equipmentId, macAddressString)
) WITH comment='Index Table to look up wireless client sessions by macAddressString, location, and equipment by TIP WLAN CloudSDK. Records automatically expire after 30 days'
AND default_time_to_live = 2592000;
) WITH comment='Index Table to look up wireless client sessions by macAddressString, location, and equipment by TIP WLAN CloudSDK. Records automatically expire after 24 hours'
AND default_time_to_live = 86400;
CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session_by_mac_and_equipment (
customerId int,
@@ -85,8 +85,8 @@ CREATE TABLE IF NOT EXISTS tip_wlan_keyspace.client_session_by_mac_and_equipment
macAddressString text,
PRIMARY KEY ((macAddress), equipmentId, locationId, macAddressString)
) WITH comment='Index Table to look up wireless client sessions by macAddressString, location, and equipment by TIP WLAN CloudSDK. Records automatically expire after 30 days'
AND default_time_to_live = 2592000;
) WITH comment='Index Table to look up wireless client sessions by macAddressString, location, and equipment by TIP WLAN CloudSDK. Records automatically expire after 24 hours'
AND default_time_to_live = 86400;
DROP INDEX IF EXISTS tip_wlan_keyspace.idx_client_session_by_mac_macAddressString;
DROP INDEX IF EXISTS tip_wlan_keyspace.idx_client_session_by_mac_and_equipment_macAddressString;

View File

@@ -0,0 +1 @@
/target/

1
client-datastore-inmemory/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
client-datastore-interface/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -1,43 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.telecominfraproject.wlan</groupId>
<artifactId>tip-wlan-cloud-root-pom</artifactId>
<version>1.1.0-SNAPSHOT</version>
<relativePath>../../wlan-cloud-root</relativePath>
</parent>
<artifactId>client-datastore-rdbms</artifactId>
<name>client-datastore-rdbms</name>
<description>SQL implementation of the data store</description>
<dependencies>
<dependency>
<groupId>com.telecominfraproject.wlan</groupId>
<artifactId>base-jdbc</artifactId>
<version>1.1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.telecominfraproject.wlan</groupId>
<artifactId>client-datastore-interface</artifactId>
<version>1.1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.telecominfraproject.wlan</groupId>
<artifactId>base-jdbc-tests</artifactId>
<version>1.1.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.telecominfraproject.wlan</groupId>
<artifactId>client-datastore-common-test</artifactId>
<version>1.1.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@@ -1,493 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.PreparedStatementCreator;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import com.telecominfraproject.wlan.client.info.models.ClientInfoDetails;
import com.telecominfraproject.wlan.client.models.Client;
import com.telecominfraproject.wlan.core.model.equipment.MacAddress;
import com.telecominfraproject.wlan.core.model.pagination.ColumnAndSort;
import com.telecominfraproject.wlan.core.model.pagination.PaginationContext;
import com.telecominfraproject.wlan.core.model.pagination.PaginationResponse;
import com.telecominfraproject.wlan.core.model.pagination.SortOrder;
import com.telecominfraproject.wlan.core.server.jdbc.BaseJdbcDao;
import com.telecominfraproject.wlan.datastore.exceptions.DsConcurrentModificationException;
import com.telecominfraproject.wlan.datastore.exceptions.DsDuplicateEntityException;
import com.telecominfraproject.wlan.datastore.exceptions.DsEntityNotFoundException;
/**
* @author dtoptygin
*
*/
@Repository
@Transactional(propagation = Propagation.MANDATORY)
public class ClientDAO extends BaseJdbcDao {
private static final Logger LOG = LoggerFactory.getLogger(ClientDatastoreRdbms.class);
private static final String COL_ID = "macAddress";
private static final String[] GENERATED_KEY_COLS = { };
private static final String[] ALL_COLUMNS_LIST = {
COL_ID,
"macAddressString",
//TODO: add colums from properties Client in here
"customerId",
"details",
//make sure the order of properties matches this list and list in ClientRowMapper and list in create/update methods
"createdTimestamp",
"lastModifiedTimestamp"
};
private static final Set<String> columnsToSkipForInsert = new HashSet<>(Arrays.asList());
private static final Set<String> columnsToSkipForUpdate = new HashSet<>(Arrays.asList(COL_ID, "macAddressString", "createdTimestamp", "customerId"));
private static final String TABLE_NAME = "client";
private static final String TABLE_PREFIX = "c.";
private static final String ALL_COLUMNS;
private static final Set<String> ALL_COLUMNS_LOWERCASE = new HashSet<>();
@SuppressWarnings("unused")
//use this for queries where multiple tables are involved
private static final String ALL_COLUMNS_WITH_PREFIX;
private static final String ALL_COLUMNS_FOR_INSERT;
private static final String BIND_VARS_FOR_INSERT;
private static final String ALL_COLUMNS_UPDATE;
static{
StringBuilder strbAllColumns = new StringBuilder(1024);
StringBuilder strbAllColumnsWithPrefix = new StringBuilder(1024);
StringBuilder strbAllColumnsForInsert = new StringBuilder(1024);
StringBuilder strbBindVarsForInsert = new StringBuilder(128);
StringBuilder strbColumnsForUpdate = new StringBuilder(512);
for(String colName: ALL_COLUMNS_LIST){
ALL_COLUMNS_LOWERCASE.add(colName.toLowerCase());
strbAllColumns.append(colName).append(",");
strbAllColumnsWithPrefix.append(TABLE_PREFIX).append(colName).append(",");
if(!columnsToSkipForInsert.contains(colName)){
strbAllColumnsForInsert.append(colName).append(",");
strbBindVarsForInsert.append("?,");
}
if(!columnsToSkipForUpdate.contains(colName)){
strbColumnsForUpdate.append(colName).append("=?,");
}
}
// remove trailing ','
strbAllColumns.deleteCharAt(strbAllColumns.length() - 1);
strbAllColumnsWithPrefix.deleteCharAt(strbAllColumnsWithPrefix.length() - 1);
strbAllColumnsForInsert.deleteCharAt(strbAllColumnsForInsert.length() - 1);
strbBindVarsForInsert.deleteCharAt(strbBindVarsForInsert.length() - 1);
strbColumnsForUpdate.deleteCharAt(strbColumnsForUpdate.length() - 1);
ALL_COLUMNS = strbAllColumns.toString();
ALL_COLUMNS_WITH_PREFIX = strbAllColumnsWithPrefix.toString();
ALL_COLUMNS_FOR_INSERT = strbAllColumnsForInsert.toString();
BIND_VARS_FOR_INSERT = strbBindVarsForInsert.toString();
ALL_COLUMNS_UPDATE = strbColumnsForUpdate.toString();
}
private static final String SQL_GET_BY_ID =
"select " + ALL_COLUMNS +
" from "+TABLE_NAME+" " +
" where customerId = ? and " + COL_ID + " = ?";
private static final String SQL_GET_BY_CUSTOMER_ID =
"select " + ALL_COLUMNS +
" from " + TABLE_NAME + " " +
" where customerId = ? ";
private static final String SQL_GET_BLOCKED_CLIENTS = "select " + ALL_COLUMNS_WITH_PREFIX +
" from " + TABLE_NAME + " c , client_blocklist cb " +
" where cb.customerId = ? and c.customerId = cb.customerId and c.macAddress = cb.macAddress ";
private static final String SQL_GET_LASTMOD_BY_ID =
"select lastModifiedTimestamp " +
" from "+TABLE_NAME+" " +
" where customerId = ? and " + COL_ID + " = ?";
private static final String SQL_INSERT =
"insert into "+TABLE_NAME+" ( "
+ ALL_COLUMNS_FOR_INSERT
+ " ) values ( "+BIND_VARS_FOR_INSERT+" ) ";
private static final String SQL_DELETE =
"delete from "+TABLE_NAME+" where customerId = ? and " + COL_ID + " = ? ";
private static final String SQL_UPDATE =
"update "+TABLE_NAME+" set "
+ ALL_COLUMNS_UPDATE +
" where customerId = ? and " + COL_ID + " = ? "
+ " and ( lastModifiedTimestamp = ? or ? = true) " //last parameter will allow us to skip check for concurrent modification, if necessary
;
private static final String SQL_GET_ALL_IN_SET = "select " + ALL_COLUMNS + " from "+TABLE_NAME + " where customerId = ? and "+ COL_ID +" in ";
private static final String SQL_APPEND_SEARCH_MAC_SUBSTRING =
"and macAddressString like ? ";
private static final String SQL_PAGING_SUFFIX = " LIMIT ? OFFSET ? ";
private static final String SORT_SUFFIX = "";
private static final String SQL_INSERT_BLOCK_LIST = "insert into client_blocklist (customerId, macAddress) values (?, ?) ";
private static final String SQL_DELETE_BLOCK_LIST = "delete from client_blocklist where customerId = ? and macAddress = ? ";
private static final RowMapper<Client> clientRowMapper = new ClientRowMapper();
@Autowired(required=false)
public void setDataSource(ClientDataSourceInterface dataSource) {
setDataSource((DataSource)dataSource);
}
public Client create(final Client client) {
final long ts = System.currentTimeMillis();
try{
jdbcTemplate.update(
new PreparedStatementCreator() {
public PreparedStatement createPreparedStatement(Connection connection) throws SQLException {
PreparedStatement ps = connection.prepareStatement(SQL_INSERT );
int colIdx = 1;
//TODO: add remaining properties from Client here
ps.setLong(colIdx++, client.getMacAddress().getAddressAsLong());
ps.setString(colIdx++, client.getMacAddress().getAddressAsString());
ps.setInt(colIdx++, client.getCustomerId());
ps.setBytes(colIdx++, (client.getDetails()!=null)?client.getDetails().toZippedBytes():null);
ps.setLong(colIdx++, ts);
ps.setLong(colIdx++, ts);
return ps;
}
});
}catch (DuplicateKeyException e) {
throw new DsDuplicateEntityException(e);
}
//update blocked_client table, if needed
if((client.getDetails() instanceof ClientInfoDetails)
&& ((ClientInfoDetails)client.getDetails()).getBlocklistDetails()!=null
&& ((ClientInfoDetails)client.getDetails()).getBlocklistDetails().isEnabled()
) {
this.jdbcTemplate.update( SQL_INSERT_BLOCK_LIST,
client.getCustomerId(), client.getMacAddress().getAddressAsLong());
client.setNeedToUpdateBlocklist(true);
}
client.setCreatedTimestamp(ts);
client.setLastModifiedTimestamp(ts);
LOG.debug("Stored Client {}", client);
return client.clone();
}
@Transactional(noRollbackFor = { EmptyResultDataAccessException.class })
public Client getOrNull(int customerId, MacAddress clientMac) {
LOG.debug("Looking up Client for id {} {}", customerId, clientMac);
try{
Client client = this.jdbcTemplate.queryForObject(
SQL_GET_BY_ID,
clientRowMapper, customerId, clientMac.getAddressAsLong());
LOG.debug("Found Client {}", client);
return client;
}catch (EmptyResultDataAccessException e) {
LOG.debug("Could not find Client for id {} {}", customerId, clientMac);
return null;
}
}
public Client update(Client client) {
long newLastModifiedTs = System.currentTimeMillis();
long incomingLastModifiedTs = client.getLastModifiedTimestamp();
Client existingClient = getOrNull(client.getCustomerId(), client.getMacAddress());
if(existingClient==null) {
LOG.debug("Cannot find Client for {} {}", client.getCustomerId(), client.getMacAddress());
throw new DsEntityNotFoundException("Client not found " + client.getCustomerId() + " " + client.getMacAddress());
}
int updateCount = this.jdbcTemplate.update(SQL_UPDATE, new Object[]{
//TODO: add remaining properties from Client here
(client.getDetails()!=null)?client.getDetails().toZippedBytes():null ,
//client.getCreatedTimestamp(), - not updating this one
newLastModifiedTs,
// use id for update operation
client.getCustomerId(),
client.getMacAddress().getAddressAsLong(),
// use lastModifiedTimestamp for data protection against concurrent modifications
incomingLastModifiedTs,
isSkipCheckForConcurrentUpdates()
});
if(updateCount==0){
if(isSkipCheckForConcurrentUpdates()){
//in this case we did not request protection against concurrent updates,
//so the updateCount is 0 because record in db was not found
throw new EmptyResultDataAccessException(1);
}
long recordTimestamp = existingClient.getLastModifiedTimestamp();
LOG.debug("Concurrent modification detected for Client with id {} {} expected version is {} but version in db was {}",
client.getCustomerId(),
client.getMacAddress().getAddressAsLong(),
incomingLastModifiedTs,
recordTimestamp
);
throw new DsConcurrentModificationException("Concurrent modification detected for Client with id "
+ client.getCustomerId() + " " + client.getMacAddress()
+" expected version is " + incomingLastModifiedTs
+" but version in db was " + recordTimestamp
);
}
//update client_blocklist table, if the blocking state of the client has changed
boolean existingClientBlocked = (existingClient.getDetails() instanceof ClientInfoDetails)
&& ((ClientInfoDetails)existingClient.getDetails()).getBlocklistDetails()!=null
&& ((ClientInfoDetails)existingClient.getDetails()).getBlocklistDetails().isEnabled();
boolean updatedClientBlocked = (client.getDetails() instanceof ClientInfoDetails)
&& ((ClientInfoDetails)client.getDetails()).getBlocklistDetails()!=null
&& ((ClientInfoDetails)client.getDetails()).getBlocklistDetails().isEnabled();
if(existingClientBlocked != updatedClientBlocked) {
if(updatedClientBlocked) {
//insert record into client_blocklist table
this.jdbcTemplate.update( SQL_INSERT_BLOCK_LIST,
client.getCustomerId(), client.getMacAddress().getAddressAsLong());
} else {
//delete record from client_blocklist table
this.jdbcTemplate.update( SQL_DELETE_BLOCK_LIST,
client.getCustomerId(), client.getMacAddress().getAddressAsLong());
}
//notify the caller that block list needs to be updated
client.setNeedToUpdateBlocklist(true);
}
//make a copy so that we don't accidentally update caller's version by reference
Client clientCopy = client.clone();
clientCopy.setLastModifiedTimestamp(newLastModifiedTs);
LOG.debug("Updated Client {}", clientCopy);
return clientCopy;
}
public Client delete(int customerId, MacAddress clientMac) {
Client client = getOrNull(customerId, clientMac);
if(client!=null) {
this.jdbcTemplate.update(SQL_DELETE, customerId, clientMac.getAddressAsLong());
} else {
throw new DsEntityNotFoundException("Cannot find Client for id " + customerId + " " + clientMac);
}
//delete from client_blocklist table happens by foreign key cascade
//but we still need to tell the caller if the blocklist need to be updated
if((client.getDetails() instanceof ClientInfoDetails)
&& ((ClientInfoDetails)client.getDetails()).getBlocklistDetails()!=null
&& ((ClientInfoDetails)client.getDetails()).getBlocklistDetails().isEnabled()
) {
client.setNeedToUpdateBlocklist(true);
}
LOG.debug("Deleted Client {} {}", customerId, clientMac);
return client;
}
public List<Client> getAllForCustomer(int customerId) {
LOG.debug("Looking up Clients for customer {}", customerId);
List<Client> ret = this.jdbcTemplate.query(SQL_GET_BY_CUSTOMER_ID,
clientRowMapper, customerId);
LOG.debug("Found Clients for customer {} : {}", customerId, ret);
return ret;
}
public List<Client> get(int customerId, Set<MacAddress> clientMacSet) {
LOG.debug("calling get({}, {})", customerId, clientMacSet);
if (clientMacSet == null || clientMacSet.isEmpty()) {
return Collections.emptyList();
}
StringBuilder set = new StringBuilder(256);
set.append("(");
for(int i =0; i< clientMacSet.size(); i++) {
set.append("?,");
}
//remove last comma
set.deleteCharAt(set.length()-1);
set.append(")");
String query = SQL_GET_ALL_IN_SET + set;
ArrayList<Object> bindVars = new ArrayList<>();
bindVars.add(customerId);
clientMacSet.forEach(m -> bindVars.add(m.getAddressAsLong()) );
List<Client> results = this.jdbcTemplate.query(query, bindVars.toArray(), clientRowMapper);
LOG.debug("get({}, {}) returns {} record(s)", customerId, clientMacSet, results.size());
return results;
}
public PaginationResponse<Client> getForCustomer(int customerId, String macSubstring,
List<ColumnAndSort> sortBy, PaginationContext<Client> context) {
PaginationResponse<Client> ret = new PaginationResponse<>();
ret.setContext(context.clone());
if (ret.getContext().isLastPage()) {
// no more pages available according to the context
LOG.debug(
"No more pages available when looking up Clients for customer {} macSubstring {} with last returned page number {}",
customerId, macSubstring, context.getLastReturnedPageNumber());
return ret;
}
LOG.debug("Looking up Clients for customer {} macSubstring {} with last returned page number {}",
customerId, macSubstring, context.getLastReturnedPageNumber());
String query = SQL_GET_BY_CUSTOMER_ID;
// add filters for the query
ArrayList<Object> queryArgs = new ArrayList<>();
queryArgs.add(customerId);
if (macSubstring != null) {
query += SQL_APPEND_SEARCH_MAC_SUBSTRING;
queryArgs.add("%" + macSubstring.toLowerCase() + "%");
}
// add sorting options for the query
StringBuilder strbSort = new StringBuilder(100);
strbSort.append(" order by ");
if (sortBy != null && !sortBy.isEmpty()) {
// use supplied sorting options
for (ColumnAndSort column : sortBy) {
if (!ALL_COLUMNS_LOWERCASE.contains(column.getColumnName().toLowerCase())) {
// unknown column, skip it
continue;
}
strbSort.append(column.getColumnName());
if (column.getSortOrder() == SortOrder.desc) {
strbSort.append(" desc");
}
strbSort.append(",");
}
// remove last ','
strbSort.deleteCharAt(strbSort.length() - 1);
} else {
// no sort order was specified - sort by id to have consistent
// paging
strbSort.append(COL_ID);
}
query += strbSort.toString();
// add pagination parameters for the query
query += SQL_PAGING_SUFFIX ;
queryArgs.add(context.getMaxItemsPerPage());
queryArgs.add(context.getTotalItemsReturned());
/*
* https://www.citusdata.com/blog/2016/03/30/five-ways-to-paginate/
* Choosing offset=1000 makes cost about 19 and has a 0.609 ms execution
* time. Once offset=5,000,000 the cost goes up to 92734 and execution
* time is 758.484 ms. - DT: still acceptable for our use case
*/
List<Client> pageItems = this.jdbcTemplate.query(query, queryArgs.toArray(),
clientRowMapper);
LOG.debug("Found {} Clients for customer {} macSubstring {} with last returned page number {}",
pageItems.size(), customerId, macSubstring, context.getLastReturnedPageNumber());
ret.setItems(pageItems);
// adjust context for the next page
ret.prepareForNextPage();
// startAfterItem is not used in RDBMS datastores, set it to null
ret.getContext().setStartAfterItem(null);
return ret;
}
public List<Client> getBlockedClients(int customerId) {
LOG.debug("calling getBlockedClients({})", customerId);
List<Client> results = this.jdbcTemplate.query(SQL_GET_BLOCKED_CLIENTS, clientRowMapper, customerId);
LOG.debug("getBlockedClients({}) returns {} record(s)", customerId, results.size());
return results;
}
}

View File

@@ -1,31 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Profile;
import org.springframework.context.annotation.PropertySource;
import org.springframework.stereotype.Component;
import com.telecominfraproject.wlan.core.server.jdbc.BaseDataSourceConfig;
/**
* @author dtoptygin
*
*/
@Component
@Profile("!use_single_ds")
@PropertySource({ "${client-ds.props:classpath:client-ds.properties}" })
public class ClientDataSourceConfig extends BaseDataSourceConfig {
@Bean
public ClientDataSourceInterface clientDataSourceInterface(){
ClientDataSourceInterface ret = new ClientDataSourceImpl(getDataSource(), getKeyColumnConverter());
return ret;
}
@Override
public String getDataSourceName() {
return "client-ds";
}
}

View File

@@ -1,18 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import javax.sql.DataSource;
import com.telecominfraproject.wlan.core.server.jdbc.BaseJDbcDataSource;
import com.telecominfraproject.wlan.core.server.jdbc.BaseKeyColumnConverter;
/**
* @author dtoptygin
* SQL DataSource that is used by Client Service
*/
public class ClientDataSourceImpl extends BaseJDbcDataSource implements ClientDataSourceInterface {
public ClientDataSourceImpl(DataSource targetDataSource, BaseKeyColumnConverter keyColumnConverter){
super(targetDataSource, keyColumnConverter);
}
}

View File

@@ -1,11 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import javax.sql.DataSource;
/**
* @author dtoptygin
* Marker interface to distinguish SQL DataSource that is used by Client Service
*/
public interface ClientDataSourceInterface extends DataSource {
}

View File

@@ -1,101 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import java.util.List;
import java.util.Set;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import com.telecominfraproject.wlan.core.model.equipment.MacAddress;
import com.telecominfraproject.wlan.core.model.pagination.ColumnAndSort;
import com.telecominfraproject.wlan.core.model.pagination.PaginationContext;
import com.telecominfraproject.wlan.core.model.pagination.PaginationResponse;
import com.telecominfraproject.wlan.client.datastore.ClientDatastore;
import com.telecominfraproject.wlan.client.models.Client;
import com.telecominfraproject.wlan.client.session.models.ClientSession;
/**
* @author dtoptygin
*
*/
@Configuration
public class ClientDatastoreRdbms implements ClientDatastore {
@Autowired ClientDAO clientDAO;
@Autowired ClientSessionDAO clientSessionDAO;
@Override
public Client create(Client client) {
return clientDAO.create(client);
}
@Override
public Client getOrNull(int customerId, MacAddress clientMac) {
return clientDAO.getOrNull(customerId, clientMac);
}
@Override
public Client update(Client client) {
return clientDAO.update(client);
}
@Override
public Client delete(int customerId, MacAddress clientMac) {
return clientDAO.delete(customerId, clientMac);
}
@Override
public List<Client> get(int customerId, Set<MacAddress> clientMacSet) {
return clientDAO.get(customerId, clientMacSet);
}
@Override
public List<Client> getBlockedClients(int customerId) {
return clientDAO.getBlockedClients(customerId);
}
@Override
public PaginationResponse<Client> getForCustomer(int customerId, String macSubstring,
List<ColumnAndSort> sortBy, PaginationContext<Client> context) {
if(context == null) {
context = new PaginationContext<>();
}
return clientDAO.getForCustomer(customerId, macSubstring, sortBy, context);
}
@Override
public ClientSession getSessionOrNull(int customerId, long equipmentId, MacAddress clientMac) {
return clientSessionDAO.getSessionOrNull(customerId, equipmentId, clientMac);
}
@Override
public ClientSession updateSession(ClientSession clientSession) {
return clientSessionDAO.updateSession(clientSession);
}
@Override
public ClientSession deleteSession(int customerId, long equipmentId, MacAddress clientMac) {
return clientSessionDAO.deleteSession(customerId, equipmentId, clientMac);
}
@Override
public List<ClientSession> getSessions(int customerId, Set<MacAddress> clientMacSet) {
return clientSessionDAO.getSessions(customerId, clientMacSet);
}
@Override
public PaginationResponse<ClientSession> getSessionsForCustomer(int customerId, Set<Long> equipmentIds, Set<Long> locationIds,
String macSubstring, List<ColumnAndSort> sortBy, PaginationContext<ClientSession> context) {
if(context == null) {
context = new PaginationContext<>();
}
return clientSessionDAO.getSessionsForCustomer(customerId, equipmentIds, locationIds, macSubstring, sortBy, context);
}
}

View File

@@ -1,51 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.RowMapper;
import com.telecominfraproject.wlan.core.model.equipment.MacAddress;
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
import com.telecominfraproject.wlan.client.models.Client;
import com.telecominfraproject.wlan.client.models.ClientDetails;
/**
* @author dtoptygin
*
*/
public class ClientRowMapper implements RowMapper<Client> {
private static final Logger LOG = LoggerFactory.getLogger(ClientRowMapper.class);
public Client mapRow(ResultSet rs, int rowNum) throws SQLException {
Client client = new Client();
int colIdx=1;
client.setMacAddress(new MacAddress(rs.getLong(colIdx++)));
// macAddressString here does not need to map again to Client Object
colIdx++;
//TODO: add columns from properties Client in here.
//make sure order of fields is the same as defined in Client
client.setCustomerId(rs.getInt(colIdx++));
byte[] zippedBytes = rs.getBytes(colIdx++);
if (zippedBytes !=null) {
try {
ClientDetails details = BaseJsonModel.fromZippedBytes(zippedBytes, ClientDetails.class);
client.setDetails(details);
} catch (RuntimeException exp) {
LOG.error("Failed to decode ClientDetails from database for id {} {}", client.getCustomerId(), client.getMacAddress());
}
}
client.setCreatedTimestamp(rs.getLong(colIdx++));
client.setLastModifiedTimestamp(rs.getLong(colIdx++));
return client;
}
}

View File

@@ -1,470 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.annotation.PostConstruct;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.dao.EmptyResultDataAccessException;
import org.springframework.jdbc.core.PreparedStatementCreator;
import org.springframework.jdbc.core.RowMapper;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import com.telecominfraproject.wlan.client.session.models.ClientSession;
import com.telecominfraproject.wlan.core.model.equipment.MacAddress;
import com.telecominfraproject.wlan.core.model.pagination.ColumnAndSort;
import com.telecominfraproject.wlan.core.model.pagination.PaginationContext;
import com.telecominfraproject.wlan.core.model.pagination.PaginationResponse;
import com.telecominfraproject.wlan.core.model.pagination.SortOrder;
import com.telecominfraproject.wlan.core.server.jdbc.BaseJdbcDao;
import com.telecominfraproject.wlan.datastore.exceptions.DsConcurrentModificationException;
import com.telecominfraproject.wlan.datastore.exceptions.DsDuplicateEntityException;
import com.telecominfraproject.wlan.datastore.exceptions.DsEntityNotFoundException;
/**
* @author dtoptygin
*
*/
@Repository
@Transactional(propagation = Propagation.MANDATORY)
public class ClientSessionDAO extends BaseJdbcDao {
private static final Logger LOG = LoggerFactory.getLogger(ClientDatastoreRdbms.class);
private static final String COL_ID = "macAddress";
private static final String[] GENERATED_KEY_COLS = { };
private static final String[] ALL_COLUMNS_LIST = {
COL_ID,
"macAddressString",
//TODO: add colums from properties ClientSession in here
"customerId",
"equipmentId",
"locationId",
"details",
//make sure the order of properties matches this list and list in ClientSessionRowMapper and list in create/update methods
"lastModifiedTimestamp"
};
private static final Set<String> columnsToSkipForInsert = new HashSet<>(Arrays.asList());
private static final Set<String> columnsToSkipForUpdate = new HashSet<>(Arrays.asList(COL_ID, "macAddressString", "customerId", "equipmentId"));
private static final String TABLE_NAME = "client_session";
private static final String TABLE_PREFIX = "s.";
private static final String ALL_COLUMNS;
private static final Set<String> ALL_COLUMNS_LOWERCASE = new HashSet<>();
@SuppressWarnings("unused")
//use this for queries where multiple tables are involved
private static final String ALL_COLUMNS_WITH_PREFIX;
private static final String ALL_COLUMNS_FOR_INSERT;
private static final String BIND_VARS_FOR_INSERT;
private static final String ALL_COLUMNS_UPDATE;
static{
StringBuilder strbAllColumns = new StringBuilder(1024);
StringBuilder strbAllColumnsWithPrefix = new StringBuilder(1024);
StringBuilder strbAllColumnsForInsert = new StringBuilder(1024);
StringBuilder strbBindVarsForInsert = new StringBuilder(128);
StringBuilder strbColumnsForUpdate = new StringBuilder(512);
for(String colName: ALL_COLUMNS_LIST){
ALL_COLUMNS_LOWERCASE.add(colName.toLowerCase());
strbAllColumns.append(colName).append(",");
strbAllColumnsWithPrefix.append(TABLE_PREFIX).append(colName).append(",");
if(!columnsToSkipForInsert.contains(colName)){
strbAllColumnsForInsert.append(colName).append(",");
strbBindVarsForInsert.append("?,");
}
if(!columnsToSkipForUpdate.contains(colName)){
strbColumnsForUpdate.append(colName).append("=?,");
}
}
// remove trailing ','
strbAllColumns.deleteCharAt(strbAllColumns.length() - 1);
strbAllColumnsWithPrefix.deleteCharAt(strbAllColumnsWithPrefix.length() - 1);
strbAllColumnsForInsert.deleteCharAt(strbAllColumnsForInsert.length() - 1);
strbBindVarsForInsert.deleteCharAt(strbBindVarsForInsert.length() - 1);
strbColumnsForUpdate.deleteCharAt(strbColumnsForUpdate.length() - 1);
ALL_COLUMNS = strbAllColumns.toString();
ALL_COLUMNS_WITH_PREFIX = strbAllColumnsWithPrefix.toString();
ALL_COLUMNS_FOR_INSERT = strbAllColumnsForInsert.toString();
BIND_VARS_FOR_INSERT = strbBindVarsForInsert.toString();
ALL_COLUMNS_UPDATE = strbColumnsForUpdate.toString();
}
private static final String SQL_GET_BY_ID =
"select " + ALL_COLUMNS +
" from "+TABLE_NAME+" " +
" where customerId = ? and equipmentId = ? and " + COL_ID + " = ?";
private static final String SQL_GET_BY_CUSTOMER_ID =
"select " + ALL_COLUMNS +
" from " + TABLE_NAME + " " +
" where customerId = ? ";
private static final String SQL_GET_LASTMOD_BY_ID =
"select lastModifiedTimestamp " +
" from "+TABLE_NAME+" " +
" where customerId = ? and equipmentId = ? and " + COL_ID + " = ?";
private static final String SQL_INSERT =
"insert into "+TABLE_NAME+" ( "
+ ALL_COLUMNS_FOR_INSERT
+ " ) values ( "+BIND_VARS_FOR_INSERT+" ) ";
private static final String SQL_DELETE =
"delete from "+TABLE_NAME+" where customerId = ? and equipmentId = ? and " + COL_ID + " = ? ";
private static final String SQL_UPDATE =
"update "+TABLE_NAME+" set "
+ ALL_COLUMNS_UPDATE +
" where customerId = ? and equipmentId = ? and " + COL_ID + " = ? "
+ " and ( lastModifiedTimestamp = ? or ? = true) " //last parameter will allow us to skip check for concurrent modification, if necessary
;
private static final String SQL_GET_ALL_IN_SET = "select " + ALL_COLUMNS + " from "+TABLE_NAME + " where customerId = ? and "+ COL_ID +" in ";
private static final String SQL_APPEND_SEARCH_MAC_SUBSTRING =
"and macAddressString like ? ";
private static final String SQL_PAGING_SUFFIX = " LIMIT ? OFFSET ? ";
private static final String SORT_SUFFIX = "";
private static final RowMapper<ClientSession> clientSessionRowMapper = new ClientSessionRowMapper();
@Autowired(required=false)
public void setDataSource(ClientDataSourceInterface dataSource) {
setDataSource((DataSource)dataSource);
}
@PostConstruct
void afterPropertiesSet(){
setSkipCheckForConcurrentUpdates(true);
}
public ClientSession create(final ClientSession clientSession) {
final long ts = System.currentTimeMillis();
try{
jdbcTemplate.update(
new PreparedStatementCreator() {
public PreparedStatement createPreparedStatement(Connection connection) throws SQLException {
PreparedStatement ps = connection.prepareStatement(SQL_INSERT );
int colIdx = 1;
//TODO: add remaining properties from Client here
ps.setLong(colIdx++, clientSession.getMacAddress().getAddressAsLong());
ps.setString(colIdx++, clientSession.getMacAddress().getAddressAsString());
ps.setInt(colIdx++, clientSession.getCustomerId());
ps.setLong(colIdx++, clientSession.getEquipmentId());
ps.setLong(colIdx++, clientSession.getLocationId());
ps.setBytes(colIdx++, (clientSession.getDetails()!=null)?clientSession.getDetails().toZippedBytes():null);
ps.setLong(colIdx++, ts);
return ps;
}
});
}catch (DuplicateKeyException e) {
throw new DsDuplicateEntityException(e);
}
clientSession.setLastModifiedTimestamp(ts);
LOG.debug("Stored Client session {}", clientSession);
return clientSession.clone();
}
@Transactional(noRollbackFor = { EmptyResultDataAccessException.class })
public ClientSession getSessionOrNull(int customerId, long equipmentId, MacAddress clientMac) {
LOG.debug("Looking up Client session for id {} {} {}", customerId, equipmentId, clientMac);
try{
ClientSession client = this.jdbcTemplate.queryForObject(
SQL_GET_BY_ID,
clientSessionRowMapper, customerId, equipmentId, clientMac.getAddressAsLong());
LOG.debug("Found Client session {}", client);
return client;
}catch (EmptyResultDataAccessException e) {
LOG.debug("Could not find Client for id {} {}", customerId, clientMac);
return null;
}
}
public ClientSession updateSession(ClientSession clientSession) {
long newLastModifiedTs = System.currentTimeMillis();
long incomingLastModifiedTs = clientSession.getLastModifiedTimestamp();
int updateCount = this.jdbcTemplate.update(SQL_UPDATE, new Object[]{
//TODO: add remaining properties from Client session here
clientSession.getLocationId(),
(clientSession.getDetails()!=null)?clientSession.getDetails().toZippedBytes():null ,
newLastModifiedTs,
// use id for update operation
clientSession.getCustomerId(),
clientSession.getEquipmentId(),
clientSession.getMacAddress().getAddressAsLong(),
// use lastModifiedTimestamp for data protection against concurrent modifications
incomingLastModifiedTs,
isSkipCheckForConcurrentUpdates()
});
if(updateCount==0){
try{
//find out if record could not be updated because it does not exist or because it was modified concurrently
long recordTimestamp = this.jdbcTemplate.queryForObject(
SQL_GET_LASTMOD_BY_ID,
Long.class,
clientSession.getCustomerId(), clientSession.getEquipmentId(), clientSession.getMacAddress().getAddressAsLong()
);
if(!isSkipCheckForConcurrentUpdates()){
LOG.debug("Concurrent modification detected for Client session with id {}:{}:{} expected version is {} but version in db was {}",
clientSession.getCustomerId(), clientSession.getEquipmentId(), clientSession.getMacAddress(),
incomingLastModifiedTs,
recordTimestamp
);
throw new DsConcurrentModificationException("Concurrent modification detected for Client session with id "
+ clientSession.getCustomerId() + ":" + clientSession.getEquipmentId() + ":" + clientSession.getMacAddress()
+" expected version is " + incomingLastModifiedTs
+" but version in db was " + recordTimestamp
);
}
}catch (EmptyResultDataAccessException e) {
LOG.debug("Could not find Client for id {}:{}:{}", clientSession.getCustomerId(), clientSession.getEquipmentId(), clientSession.getMacAddress());
if(isSkipCheckForConcurrentUpdates()){
//in this case we did not request protection against concurrent updates,
//so the updateCount is 0 because record in db was not found
//we'll create a new record
return create(clientSession);
} else {
throw new DsEntityNotFoundException("Client session not found " + clientSession.getCustomerId() + ":"
+ clientSession.getEquipmentId() + ":" + clientSession.getMacAddress());
}
}
}
//make a copy so that we don't accidentally update caller's version by reference
ClientSession clientSessionCopy = clientSession.clone();
clientSessionCopy.setLastModifiedTimestamp(newLastModifiedTs);
LOG.debug("Updated Client session {}", clientSessionCopy);
return clientSessionCopy;
}
public ClientSession deleteSession(int customerId, long equipmentId, MacAddress clientMac) {
ClientSession client = getSessionOrNull(customerId, equipmentId, clientMac);
if(client!=null) {
this.jdbcTemplate.update(SQL_DELETE, customerId, equipmentId, clientMac.getAddressAsLong());
} else {
throw new DsEntityNotFoundException("Cannot find Client for id " + customerId + " " + equipmentId+ " " + clientMac);
}
LOG.debug("Deleted Client session {} {} {}", customerId, equipmentId, clientMac);
return client;
}
public List<ClientSession> getSessions(int customerId, Set<MacAddress> clientMacSet) {
LOG.debug("calling getSessions({}, {})", customerId, clientMacSet);
if (clientMacSet == null || clientMacSet.isEmpty()) {
return Collections.emptyList();
}
StringBuilder set = new StringBuilder(256);
set.append("(");
for(int i =0; i< clientMacSet.size(); i++) {
set.append("?,");
}
//remove last comma
set.deleteCharAt(set.length()-1);
set.append(")");
String query = SQL_GET_ALL_IN_SET + set;
ArrayList<Object> bindVars = new ArrayList<>();
bindVars.add(customerId);
clientMacSet.forEach(m -> bindVars.add(m.getAddressAsLong()) );
List<ClientSession> results = this.jdbcTemplate.query(query, bindVars.toArray(), clientSessionRowMapper);
LOG.debug("getSessions({}, {}) returns {} record(s)", customerId, clientMacSet, results.size());
return results;
}
public PaginationResponse<ClientSession> getSessionsForCustomer(int customerId, Set<Long> equipmentIds, Set<Long> locationIds,
String macSubstring, List<ColumnAndSort> sortBy, PaginationContext<ClientSession> context) {
PaginationResponse<ClientSession> ret = new PaginationResponse<>();
ret.setContext(context.clone());
if (ret.getContext().isLastPage()) {
// no more pages available according to the context
LOG.debug(
"No more pages available when looking up Client sessions for customer {} with last returned page number {}",
customerId, context.getLastReturnedPageNumber());
return ret;
}
LOG.debug("Looking up Client sessions for customer {} with last returned page number {}",
customerId, context.getLastReturnedPageNumber());
String query = SQL_GET_BY_CUSTOMER_ID;
// add filters for the query
ArrayList<Object> queryArgs = new ArrayList<>();
queryArgs.add(customerId);
//add equipmentId filters
if (equipmentIds != null && !equipmentIds.isEmpty()) {
queryArgs.addAll(equipmentIds);
StringBuilder strb = new StringBuilder(100);
strb.append("and equipmentId in (");
for (int i = 0; i < equipmentIds.size(); i++) {
strb.append("?");
if (i < equipmentIds.size() - 1) {
strb.append(",");
}
}
strb.append(") ");
query += strb.toString();
}
//add locationId filters
if (locationIds != null && !locationIds.isEmpty()) {
queryArgs.addAll(locationIds);
StringBuilder strb = new StringBuilder(100);
strb.append("and locationId in (");
for (int i = 0; i < locationIds.size(); i++) {
strb.append("?");
if (i < locationIds.size() - 1) {
strb.append(",");
}
}
strb.append(") ");
query += strb.toString();
}
//add macSubstring filter
if (macSubstring != null && !macSubstring.isEmpty()) {
query += SQL_APPEND_SEARCH_MAC_SUBSTRING;
queryArgs.add("%" + macSubstring.toLowerCase() + "%");
}
// add sorting options for the query
StringBuilder strbSort = new StringBuilder(100);
strbSort.append(" order by ");
if (sortBy != null && !sortBy.isEmpty()) {
// use supplied sorting options
for (ColumnAndSort column : sortBy) {
if (!ALL_COLUMNS_LOWERCASE.contains(column.getColumnName().toLowerCase())) {
// unknown column, skip it
continue;
}
strbSort.append(column.getColumnName());
if (column.getSortOrder() == SortOrder.desc) {
strbSort.append(" desc");
}
strbSort.append(",");
}
// remove last ','
strbSort.deleteCharAt(strbSort.length() - 1);
} else {
// no sort order was specified - sort by id to have consistent
// paging
strbSort.append(COL_ID);
}
query += strbSort.toString();
// add pagination parameters for the query
query += SQL_PAGING_SUFFIX ;
queryArgs.add(context.getMaxItemsPerPage());
queryArgs.add(context.getTotalItemsReturned());
/*
* https://www.citusdata.com/blog/2016/03/30/five-ways-to-paginate/
* Choosing offset=1000 makes cost about 19 and has a 0.609 ms execution
* time. Once offset=5,000,000 the cost goes up to 92734 and execution
* time is 758.484 ms. - DT: still acceptable for our use case
*/
List<ClientSession> pageItems = this.jdbcTemplate.query(query, queryArgs.toArray(),
clientSessionRowMapper);
if (pageItems == null) {
LOG.debug("Cannot find Client sessions for customer {} with last returned page number {}",
customerId, context.getLastReturnedPageNumber());
} else {
LOG.debug("Found {} Client sessions for customer {} with last returned page number {}",
pageItems.size(), customerId, context.getLastReturnedPageNumber());
}
ret.setItems(pageItems);
// adjust context for the next page
ret.prepareForNextPage();
// startAfterItem is not used in RDBMS datastores, set it to null
ret.getContext().setStartAfterItem(null);
return ret;
}
}

View File

@@ -1,54 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.core.RowMapper;
import com.telecominfraproject.wlan.core.model.equipment.MacAddress;
import com.telecominfraproject.wlan.core.model.json.BaseJsonModel;
import com.telecominfraproject.wlan.client.models.Client;
import com.telecominfraproject.wlan.client.models.ClientDetails;
import com.telecominfraproject.wlan.client.session.models.ClientSession;
import com.telecominfraproject.wlan.client.session.models.ClientSessionDetails;
/**
* @author dtoptygin
*
*/
public class ClientSessionRowMapper implements RowMapper<ClientSession> {
private static final Logger LOG = LoggerFactory.getLogger(ClientSessionRowMapper.class);
public ClientSession mapRow(ResultSet rs, int rowNum) throws SQLException {
ClientSession clientSession = new ClientSession();
int colIdx=1;
clientSession.setMacAddress(new MacAddress(rs.getLong(colIdx++)));
// macAddressString here does not need to map again to ClientSession Object
colIdx++;
//TODO: add columns from properties ClientSession in here.
//make sure order of fields is the same as defined in ClientSession
clientSession.setCustomerId(rs.getInt(colIdx++));
clientSession.setEquipmentId(rs.getLong(colIdx++));
clientSession.setLocationId(rs.getLong(colIdx++));
byte[] zippedBytes = rs.getBytes(colIdx++);
if (zippedBytes !=null) {
try {
ClientSessionDetails details = BaseJsonModel.fromZippedBytes(zippedBytes, ClientSessionDetails.class);
clientSession.setDetails(details);
} catch (RuntimeException exp) {
LOG.error("Failed to decode ClientSessionDetails from database for id {} {} {}", clientSession.getCustomerId(), clientSession.getEquipmentId(), clientSession.getMacAddress());
}
}
clientSession.setLastModifiedTimestamp(rs.getLong(colIdx++));
return clientSession;
}
}

View File

@@ -1,50 +0,0 @@
create table if not exists client (
-- postgresql
macAddress bigint ,
macAddressString varchar(100) ,
customerId int,
details bytea,
createdTimestamp bigint not null,
lastModifiedTimestamp bigint not null,
primary key (customerId, macAddress)
);
alter table client add column if not exists macAddressString varchar(100);
create index if not exists idx_client_customerId on client (customerId);
create index if not exists idx_client_customerId_macAddressString on client (customerId, macAddressString);
create table if not exists client_blocklist (
-- postgresql
customerId int,
macAddress bigint ,
primary key (customerId, macAddress),
FOREIGN KEY (customerId, macAddress) REFERENCES client(customerId, macAddress) ON DELETE CASCADE
);
create table if not exists client_session (
-- postgresql
macAddress bigint ,
macAddressString varchar(100) ,
customerId int,
equipmentId bigint,
locationId bigint,
details bytea,
lastModifiedTimestamp bigint not null,
primary key (customerId, equipmentId, macAddress)
);
alter table client_session add column if not exists macAddressString varchar(100);
create index if not exists idx_clientSession_customerId on client_session (customerId);
create index if not exists idx_clientSession_locationId on client_session (customerId, locationId);
create index if not exists idx_clientSession_customerId_macAddressString on client_session (customerId, macAddressString);

View File

@@ -1,68 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import org.junit.Before;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Import;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabase;
import com.telecominfraproject.wlan.client.models.Client;
import com.telecominfraproject.wlan.core.model.equipment.MacAddress;
import com.telecominfraproject.wlan.core.server.jdbc.test.BaseJdbcTest;
import com.telecominfraproject.wlan.core.server.jdbc.test.TestWithEmbeddedDB;
/**
* @author dtoptygin
*
*/
@Import(value = {
ClientDatastoreRdbms.class,
ClientDataSourceConfig.class,
ClientDAO.class,
ClientSessionDAO.class,
BaseJdbcTest.Config.class
})
@TestWithEmbeddedDB
public class ClientDatastoreRdbmsPlumbingTests extends BaseJdbcTest {
@Autowired(required=false) private EmbeddedDatabase db;
@Autowired private ClientDatastoreRdbms clientDatastore;
@Autowired private ClientDAO clientDAO;
@Before
public void setUp() {
}
@Test
public void testDataAccess() {
if(db!=null){
//this is a simple test to see if embedded db is working in test environment
JdbcTemplate jdbcTemplate = new JdbcTemplate(db);
Long ret = jdbcTemplate.queryForObject(
"select macAddress from client where customerId = ? and macAddress = ?",
Long.class, 1, 1);
assertEquals((Long)1L, ret);
}
}
@Test
public void testCreateUpdateDeleteClient() {
//GET by Id test
Client ret = clientDatastore.getOrNull(1, new MacAddress(1L));
//DELETE Test
clientDAO.delete(ret.getCustomerId(), ret.getMacAddress());
assertNull(clientDatastore.getOrNull(ret.getCustomerId(), ret.getMacAddress()));
}
}

View File

@@ -1,29 +0,0 @@
package com.telecominfraproject.wlan.client.datastore.rdbms;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
import org.springframework.context.annotation.Import;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional;
import com.telecominfraproject.wlan.core.server.jdbc.test.BaseJdbcTest;
import com.telecominfraproject.wlan.core.server.jdbc.test.TestWithEmbeddedDB;
import com.telecominfraproject.wlan.client.datastore.BaseClientDatastoreTest;
/**
* @author dtoptygin
*
*/
@RunWith(SpringRunner.class)
@SpringBootTest(webEnvironment = WebEnvironment.NONE, classes = BaseJdbcTest.Config.class)
@Rollback(value = true)
@Transactional
@Import(value = { ClientDatastoreRdbms.class, ClientDataSourceConfig.class,
ClientDAO.class, ClientSessionDAO.class, BaseJdbcTest.Config.class })
@TestWithEmbeddedDB
public class ClientDatastoreRdbmsTests extends BaseClientDatastoreTest {
}

View File

@@ -1,41 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<conversionRule conversionWord="filteredStack"
converterClass="com.telecominfraproject.wlan.server.exceptions.logback.ExceptionCompressingConverter" />
<appender name="stdout" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n%filteredStack%nopex</pattern>
<!-- See http://logback.qos.ch/manual/layouts.html for details -->
<!-- %ex{5} - add at the end to display only 5 levels of the exception stack trace -->
<!-- %nopex - add at the end to not display any of the exception stack traces -->
<!-- %ex{full} - add at the end to display all the levels of the exception stack trace -->
</encoder>
</appender>
<!--
details: http://logback.qos.ch/manual/configuration.html#auto_configuration
runtime configuration, if need to override the defaults:
-Dlogging.config=file:///home/ec2-user/opensync/logback.xml
for log configuration debugging - use
-Dlogback.statusListenerClass=ch.qos.logback.core.status.OnConsoleStatusListener
log levels:
OFF ERROR WARN INFO DEBUG TRACE
-->
<logger name="org.apache.catalina.startup.DigesterFactory" level="ERROR"/>
<logger name="org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping" level="INFO"/>
<logger name="org.springframework.boot.context.embedded.tomcat.TomcatEmbeddedServletContainer" level="INFO"/>
<logger name="org.springframework.security.web.authentication.preauth" level="OFF"/>
<logger name="com.netflix.servo.tag.aws.AwsInjectableTag" level="OFF"/>
<logger name="com.telecominfraproject" level="WARN"/>
<root level="WARN">
<appender-ref ref="stdout"/>
</root>
</configuration>

View File

@@ -1,49 +0,0 @@
drop table client if exists;
drop table client_session if exists;
create table client (
-- hsqldb
macAddress bigint,
macAddressString varchar(100),
customerId int,
details varbinary(65535),
createdTimestamp bigint not null,
lastModifiedTimestamp bigint not null,
primary key (customerId, macAddress)
);
create index idx_client_customerId on client (customerId);
create index if not exists idx_client_customerId_macAddressString on client (customerId, macAddressString);
create table if not exists client_blocklist (
-- hsqldb
customerId int,
macAddress bigint ,
primary key (customerId, macAddress),
FOREIGN KEY (customerId, macAddress) REFERENCES client(customerId, macAddress) ON DELETE CASCADE
);
create table client_session (
-- postgresql
macAddress bigint ,
macAddressString varchar(100),
customerId int,
equipmentId bigint,
locationId bigint,
details varbinary(65535),
lastModifiedTimestamp bigint not null,
primary key (customerId, equipmentId, macAddress)
);
create index idx_clientSession_customerId on client_session (customerId);
create index idx_clientSession_locationId on client_session (customerId, locationId);
create index if not exists idx_clientSession_customerId_macAddressString on client_session (customerId, macAddressString);

View File

@@ -1,14 +0,0 @@
insert into client (
macAddress,
macAddressString,
customerId,
details,
createdTimestamp,
lastModifiedTimestamp
) values (
1,
'1',
1,
null,
0,0
);

1
client-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
client-service-interface/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
client-service-local/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
client-service-remote/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
client-service/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
cloud-event-dispatcher/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -136,7 +136,6 @@
<module>../client-datastore-common-test</module>
<module>../client-datastore-inmemory</module>
<module>../client-datastore-interface</module>
<module>../client-datastore-rdbms</module>
<module>../client-datastore-cassandra</module>
<module>../client-models</module>
<module>../client-service</module>

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
customer-datastore-rdbms/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
customer-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
customer-service-interface/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
customer-service-local/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
customer-service-remote/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
customer-service/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
dashboard-sp/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
equipment-alarms-sp/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
equipment-datastore-rdbms/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
equipment-gateway-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
equipment-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
equipment-service-local/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
equipment-service-remote/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
equipment-service/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
filestore-service/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
firmware-datastore-rdbms/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
firmware-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
firmware-service-interface/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
firmware-service-local/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
firmware-service-remote/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
firmware-service/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
kafka-streams-consumer/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
kafka-streams/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

View File

@@ -0,0 +1 @@
/target/

1
location-datastore-rdbms/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
location-models/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
location-service-interface/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

1
location-service-local/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/target/

Some files were not shown because too many files have changed in this diff Show More