mirror of
https://github.com/Telecominfraproject/oopt-gnpy.git
synced 2025-11-01 18:47:48 +00:00
Merge pull request #5 from dutc/fix-consolidated-cleanup
Consolidated cleanup
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -60,3 +60,6 @@ target/
|
||||
|
||||
# pyenv python configuration file
|
||||
.python-version
|
||||
|
||||
# MacOS DS_store
|
||||
.DS_Store
|
||||
|
||||
5
Makefile
5
Makefile
@@ -43,7 +43,6 @@ clean-pyc: ## remove Python file artifacts
|
||||
find . -name '__pycache__' -exec rm -fr {} +
|
||||
|
||||
clean-test: ## remove test and coverage artifacts
|
||||
rm -fr .tox/
|
||||
rm -f .coverage
|
||||
rm -fr htmlcov/
|
||||
|
||||
@@ -52,10 +51,6 @@ lint: ## check style with flake8
|
||||
|
||||
test: ## run tests quickly with the default Python
|
||||
py.test
|
||||
|
||||
|
||||
test-all: ## run tests on every Python version with tox
|
||||
tox
|
||||
|
||||
coverage: ## check code coverage quickly with the default Python
|
||||
coverage run --source gnpy -m pytest
|
||||
|
||||
@@ -4,4 +4,7 @@ from .gnpy import (raised_cosine_comb, analytic_formula, compute_psi, fwm_eff,
|
||||
get_f_computed_interp, get_freqarray, gn_analytic, gn_model,
|
||||
interpolate_in_range, GN_integral)
|
||||
|
||||
__all__ = ['gnpy']
|
||||
from .constants import (pi, c, h)
|
||||
from .network_elements import (Network, Tx, Rx, Fiber, Edfa)
|
||||
|
||||
__all__ = ['gnpy', 'constants', 'network_elements']
|
||||
|
||||
17
gnpy/cli.py
17
gnpy/cli.py
@@ -1,17 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Console script for gnpy."""
|
||||
|
||||
import click
|
||||
|
||||
|
||||
@click.command()
|
||||
def main(args=None):
|
||||
"""Console script for gnpy."""
|
||||
click.echo("Replace this message by putting your code into "
|
||||
"gnpy.cli.main")
|
||||
click.echo("See click documentation at http://click.pocoo.org/")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
gnpy/configuration/__init__.py
Normal file
1
gnpy/configuration/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
32
gnpy/configuration/fiber_parameters.py
Normal file
32
gnpy/configuration/fiber_parameters.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# coding=utf-8
|
||||
""" fiber_parameters.py describes the fiber parameters.
|
||||
fibers is a dictionary containing a dictionary for each kind of fiber
|
||||
each dictionary has to report:
|
||||
reference_frequency: the frequency at which the parameters are evaluated [THz]
|
||||
alpha: the attenuation coefficient [dB/km]
|
||||
alpha_1st: the first derivative of alpha indicating the alpha slope [dB/km/THz]
|
||||
if you assume a flat attenuation with respect to the frequency you put it as zero
|
||||
beta_2: the dispersion coefficient [ps^2/km]
|
||||
n_2: second-order nonlinear refractive index [m^2/W]
|
||||
a typical value is 2.5E-20 m^2/W
|
||||
a_eff: the effective area of the fiber [um^2]
|
||||
"""
|
||||
|
||||
fibers = {
|
||||
'SMF': {
|
||||
'reference_frequency': 193.5,
|
||||
'alpha': 0.2,
|
||||
'alpha_1st': 0,
|
||||
'beta_2': 21.27,
|
||||
'n_2': 2.5E-20,
|
||||
'a_eff': 77.77,
|
||||
},
|
||||
'NZDF': {
|
||||
'reference_frequency': 193.5,
|
||||
'alpha': 0.22,
|
||||
'alpha_1st': 0,
|
||||
'beta_2': 21,
|
||||
'n_2': 2.5E-20,
|
||||
'a_eff': 70,
|
||||
}
|
||||
}
|
||||
40
gnpy/configuration/general_parameters.py
Normal file
40
gnpy/configuration/general_parameters.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# -*- coding: utf-8 -*
|
||||
"""general_parameters.py contains the general configuration settings
|
||||
|
||||
The sectings are subdivided in two dictionaries:
|
||||
sys_param: a dictionary containing the general system parameters:
|
||||
f0: the starting frequency of the laser grid used to describe the WDM system [THz]
|
||||
ns: the number of 6.25 GHz slots in the grid
|
||||
|
||||
control_param:
|
||||
save_each_comp: a boolean flag. If true, it saves in output folder one spectrum file at the output of each
|
||||
component, otherwise it saves just the last spectrum
|
||||
is_linear: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider NLI
|
||||
is_analytic: a boolean flag. If true, the NLI is computed through the analytic formula, otherwise it uses
|
||||
the double integral. Warning: the double integral is very slow.
|
||||
points_not_interp: if the double integral is used, it indicates how much points are calculated, others will
|
||||
be interpolated
|
||||
kind_interp: a string indicating the interpolation method for the double integral
|
||||
th_fwm: the threshold of the four wave mixing efficiency for the double integral
|
||||
n_points: number of points in which the double integral is computed in the high FWM efficiency region
|
||||
n_points_min: number of points in which the double integral is computed in the low FWM efficiency region
|
||||
n_cores: number of cores for parallel computation [not yet implemented]
|
||||
"""
|
||||
# System parameters
|
||||
sys_param = {
|
||||
'f0': 192.075,
|
||||
'ns': 328
|
||||
}
|
||||
|
||||
# control parameters
|
||||
control_param = {
|
||||
'save_each_comp': True,
|
||||
'is_linear': False,
|
||||
'is_analytic': True,
|
||||
'points_not_interp': 2,
|
||||
'kind_interp': 'linear',
|
||||
'th_fwm': 50,
|
||||
'n_points': 500,
|
||||
'n_points_min': 4,
|
||||
'n_cores': 1
|
||||
}
|
||||
59
gnpy/configuration/link_description.py
Normal file
59
gnpy/configuration/link_description.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# coding=utf-8
|
||||
""" link_description.py contains the full description of that OLE has to emulate.
|
||||
It contains a list of dictionaries, following the structure of the link and each element of the list describes one
|
||||
component.
|
||||
|
||||
'comp_cat': the kind of link component:
|
||||
PC: a passive component defined by a loss at a certain frequency and a loss tilt
|
||||
OA: an optical amplifier defined by a gain at a certain frequency, a gain tilt and a noise figure
|
||||
fiber: a span of fiber described by the type and the length
|
||||
'comp_id': is an id identifying the component. It has to be unique for each component!
|
||||
|
||||
extra fields for PC:
|
||||
'ref_freq': the frequency at which the 'loss' parameter is evaluated [THz]
|
||||
'loss': the loss at the frequency 'ref_freq' [dB]
|
||||
'loss_tlt': the frequency dependent loss [dB/THz]
|
||||
extra fields for OA:
|
||||
'ref_freq': the frequency at which the 'gain' parameter is evaluated [THz]
|
||||
'gain': the gain at the frequency 'ref_freq' [dB]
|
||||
'gain_tlt': the frequency dependent gain [dB/THz]
|
||||
'noise_figure': the noise figure of the optical amplifier [dB]
|
||||
extra fields for fiber:
|
||||
'fiber_type': a string calling the type of fiber described in the file fiber_parameters.py
|
||||
'length': the fiber length [km]
|
||||
|
||||
"""
|
||||
smf = {
|
||||
'comp_cat': 'fiber',
|
||||
'comp_id': '',
|
||||
'fiber_type': 'SMF',
|
||||
'length': 100
|
||||
}
|
||||
|
||||
oa = {
|
||||
'comp_cat': 'OA',
|
||||
'comp_id': '',
|
||||
'ref_freq': 193.5,
|
||||
'gain': 20,
|
||||
'gain_tlt': 0.0,
|
||||
'noise_figure': 5
|
||||
}
|
||||
|
||||
pc = {
|
||||
'comp_cat': 'PC',
|
||||
'comp_id': '04',
|
||||
'ref_freq': 193.,
|
||||
'loss': 2.0,
|
||||
'loss_tlt': 0.0
|
||||
}
|
||||
|
||||
link = []
|
||||
|
||||
for index in range(20):
|
||||
smf['comp_id'] = '%03d' % (2 * index)
|
||||
oa['comp_id'] = '%03d' % (2 * index + 1)
|
||||
link += [dict(smf)]
|
||||
link += [dict(oa)]
|
||||
|
||||
pc['comp_id'] = '%03d' % 40
|
||||
link += [dict(pc)]
|
||||
7
gnpy/constants.py
Normal file
7
gnpy/constants.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from math import pi
|
||||
|
||||
pi = pi
|
||||
c = 299792458 # Speed of light
|
||||
h = 6.62606896e-34 # Planck's constant
|
||||
@@ -7,13 +7,13 @@ import time
|
||||
def main():
|
||||
|
||||
# Accuracy parameters
|
||||
flag_analytic = False
|
||||
flag_analytic = True
|
||||
num_computed_values = 2
|
||||
interp_method = 'linear'
|
||||
threshold_fwm = 50
|
||||
n_points = 500
|
||||
n_points_min = 4
|
||||
accuracy_param = {'is_analytic': flag_analytic, 'n_not_interp': num_computed_values, 'kind_interp': interp_method,
|
||||
accuracy_param = {'is_analytic': flag_analytic, 'points_not_interp': num_computed_values, 'kind_interp': interp_method,
|
||||
'th_fwm': threshold_fwm, 'n_points': n_points, 'n_points_min': n_points_min}
|
||||
|
||||
# Parallelization Parameters
|
||||
@@ -22,26 +22,36 @@ def main():
|
||||
# Spectrum parameters
|
||||
num_ch = 95
|
||||
rs = np.ones(num_ch) * 0.032
|
||||
b_ch = rs # For root raised cosine shapes, the -3 dB band is equal to the symbol rate
|
||||
roll_off = np.ones(num_ch) * 0.05
|
||||
power = np.ones(num_ch) * 0.001
|
||||
central_freq = 193.5
|
||||
if num_ch % 2 == 1: # odd number of channels
|
||||
fch = np.arange(-(num_ch // 2), (num_ch // 2) + 1, 1) * 0.05 # noqa: E501
|
||||
else:
|
||||
fch = (np.arange(0, num_ch) - (num_ch / 2) + 0.5) * 0.05
|
||||
spectrum_param = {'num_ch': num_ch, 'f_ch': fch, 'rs': rs, 'roll_off': roll_off, 'power': power}
|
||||
fch = (np.arange(0, num_ch) - (num_ch / 2.0) + 0.5) * 0.05
|
||||
spectrum_param = {'num_ch': num_ch, 'f_ch': fch, 'b_ch': b_ch, 'roll_off': roll_off, 'power': power}
|
||||
|
||||
# Fiber Parameters
|
||||
beta2 = 21.27
|
||||
l_span = 100
|
||||
l_span = 100.0
|
||||
loss = 0.2
|
||||
gam = 1.27
|
||||
fiber_param = {'a_db': loss, 'span_length': l_span, 'beta2': beta2, 'gamma': gam}
|
||||
fiber_param = {'alpha': loss, 'span_length': l_span, 'beta_2': beta2, 'gamma': gam}
|
||||
|
||||
# EDFA Parameters
|
||||
noise_fig = 5.5
|
||||
gain_zero = 25.0
|
||||
gain_tilting = 0.5
|
||||
|
||||
# Compute the GN model
|
||||
t = time.time()
|
||||
nli_cmp, f_nli_cmp, nli_int, f_nli_int = gn.gn_model(spectrum_param, fiber_param, accuracy_param, n_cores) # noqa: E501
|
||||
print('Elapsed: %s' % (time.time() - t))
|
||||
|
||||
# Compute the EDFA profile
|
||||
gain, g_ase = gn.compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, fch)
|
||||
|
||||
# Compute the raised cosine comb
|
||||
f1_array = np.linspace(np.amin(fch), np.amax(fch), 1e3)
|
||||
gtx = gn.raised_cosine_comb(f1_array, rs, roll_off, fch, power)
|
||||
@@ -52,6 +62,7 @@ def main():
|
||||
plt.plot(f1_array, 10 * np.log10(gtx), '-b', label='WDM comb')
|
||||
plt.plot(f_nli_cmp, 10 * np.log10(nli_cmp), 'ro', label='GNLI computed')
|
||||
plt.plot(f_nli_int, 10 * np.log10(nli_int), 'g+', label='GNLI interpolated')
|
||||
plt.plot(fch, 10 * np.log10(g_ase), 'yo', label='GASE')
|
||||
plt.ylabel('PSD [dB(W/THz)]')
|
||||
plt.xlabel('f [THz]')
|
||||
plt.legend(loc='upper left')
|
||||
314
gnpy/examples/architecture.py
Normal file
314
gnpy/examples/architecture.py
Normal file
@@ -0,0 +1,314 @@
|
||||
from networkx import DiGraph
|
||||
from networkx.algorithms import all_simple_paths
|
||||
from collections import namedtuple
|
||||
from scipy.spatial.distance import cdist
|
||||
from numpy import array
|
||||
from itertools import product, islice, tee, count
|
||||
from networkx import (draw_networkx_nodes,
|
||||
draw_networkx_edges,
|
||||
draw_networkx_labels,
|
||||
draw_networkx_edge_labels,
|
||||
spring_layout)
|
||||
from matplotlib.pyplot import show, figure
|
||||
from warnings import catch_warnings, simplefilter
|
||||
from argparse import ArgumentParser
|
||||
|
||||
from logging import getLogger
|
||||
logger = getLogger(__name__)
|
||||
|
||||
# remember me?
|
||||
nwise = lambda g, n=2: zip(*(islice(g, i, None)
|
||||
for i, g in enumerate(tee(g, n))))
|
||||
|
||||
# here's an example that includes a little
|
||||
# bit of complexity to help suss out details
|
||||
# of the proposed architecture
|
||||
|
||||
# let's pretend there's a working group whose
|
||||
# job is to minimise latency of a network
|
||||
# that working group gets the namespace LATENCY
|
||||
|
||||
# they interact with a working group whose
|
||||
# job is to capture physical details of a network
|
||||
# such as the geographic location of each node
|
||||
# and whether nodes are mobile or fixed
|
||||
# that working group gets the namespace PHYSICAL
|
||||
|
||||
# each working group can put any arbitrary Python
|
||||
# object as the data for their given namespace
|
||||
|
||||
# the PHYSICAL group captures the following details
|
||||
# of a NODE: - whether it is mobile or fixed
|
||||
# - its (x, y) position
|
||||
# of an EDGE: - the physical length of this connection
|
||||
# - the speed of transmission over this link
|
||||
|
||||
# NOTE: for this example, we will consider network
|
||||
# objects to be MUTABLE just to simplify
|
||||
# the code
|
||||
# if the graph object is immutable, then
|
||||
# computations/transformations would return copies
|
||||
# of the original graph. This can be done easily via
|
||||
# `G.copy()`, but you'll have to account for the
|
||||
# semantic difference between shallow-vs-deep copy
|
||||
|
||||
# NOTE: we'll put the Node & Edge information for these
|
||||
# two working groups inside classes just for the
|
||||
# purpose of namespacing & just so that we can
|
||||
# write all the code for this example
|
||||
# in a single .py file: normally these pieces
|
||||
# would be in separate modules so that you can
|
||||
# `from tpe.physical import Node, Edge`
|
||||
|
||||
|
||||
class Physical:
|
||||
# for Node: neither fixed nor position are computed fields
|
||||
# - fixed cannot be changed (immutable)
|
||||
# - position can be changed (mutable)
|
||||
class Node:
|
||||
def __init__(self, fixed, position):
|
||||
self._fixed = fixed
|
||||
self.position = position
|
||||
|
||||
@property
|
||||
def fixed(self):
|
||||
return self._fixed
|
||||
|
||||
@property
|
||||
def position(self):
|
||||
return self._position
|
||||
|
||||
@position.setter
|
||||
def position(self, value):
|
||||
if len(value) != 2:
|
||||
raise ValueError('position must be (x, y) value')
|
||||
self._position = value
|
||||
|
||||
def __repr__(self):
|
||||
return f'Node({self.fixed}, {self.position})'
|
||||
|
||||
# for Edge:
|
||||
# - speed (m/s) cannot be changed (immutable)
|
||||
# - distance is COMPUTED
|
||||
class Edge(namedtuple('Edge', 'speed endpoints')):
|
||||
def distance(self, graph):
|
||||
from_node, to_node = self.endpoints
|
||||
positions = [graph.node[from_node]['physical'].position], \
|
||||
[graph.node[to_node]['physical'].position]
|
||||
return cdist(*positions)[0][0]
|
||||
|
||||
# NOTE: in this above, the computed edge data
|
||||
# is computed on a per-edge basis
|
||||
# which forces loops into Python
|
||||
# however, the PHYSICAL working group has the
|
||||
# power to decide what their API looks like
|
||||
# and they could just as easily have provided
|
||||
# some top-level function as part of their API
|
||||
# to compute this "update" graph-wise
|
||||
@staticmethod
|
||||
def compute_distances(graph):
|
||||
# XXX: here you can see the general clumsiness of moving
|
||||
# in and out of the `numpy`/`scipy` computation "domain"
|
||||
# which exposes another potential flaw in our model
|
||||
# our model is very "naturalistic": we have Python objects
|
||||
# that match to real-world objects like routers (nodes)
|
||||
# and links (edges)
|
||||
# however, from a computational perspective, we may find
|
||||
# it more efficient to work within a mathematical
|
||||
# domain where are objects are simply (sparse) matrices of
|
||||
# graph data
|
||||
# moving between these "naturalistic" and "mathematical"
|
||||
# domains can be very clumsy
|
||||
# note that there's also clumsiness in that the "naturalistic"
|
||||
# modelling pushes data storage onto individual Python objects
|
||||
# such as the edge data dicts whereas the mathematical
|
||||
# modelling keeps the data in a single place (probably in
|
||||
# the graph data dict); moving data between the two is also clumsy
|
||||
data = {k: v['physical'].position for k, v in graph.node.items()}
|
||||
positions = array(list(data.values()))
|
||||
distances = cdist(positions, positions)
|
||||
|
||||
# we can either store the above information back onto the graph itself:
|
||||
## graph['physical'].distances = distances
|
||||
|
||||
# or back onto the edge data itself:
|
||||
# for (i, u), (j, v) in product(enumerate(data), enumerate(data)):
|
||||
# if (u, v) not in graph.edge:
|
||||
# continue
|
||||
## edge, redge = graph.edge[u][v], graph.edge[v][u]
|
||||
## dist = distances[i, j]
|
||||
## edge['physical'].computed_distance = dist
|
||||
|
||||
# as part of the latency group's API, they specify that:
|
||||
# - they consume PHYSICAL data
|
||||
# - they modify PHYSICAl data
|
||||
# - they do not add their own data
|
||||
|
||||
|
||||
class Latency:
|
||||
@staticmethod
|
||||
def latency(graph, u, v):
|
||||
paths = list(all_simple_paths(graph, u, v))
|
||||
data = [(graph.get_edge_data(a, b)['physical'].speed,
|
||||
graph.get_edge_data(a, b)['physical'].distance(graph))
|
||||
for path in paths
|
||||
for a, b in nwise(path)]
|
||||
return min(distance / speed for speed, distance in data)
|
||||
|
||||
@staticmethod
|
||||
def total_latency(graph):
|
||||
return sum(Latency.latency(graph, u, v) for u, v in graph.edges())
|
||||
|
||||
@staticmethod
|
||||
def nudge(u, v, precision=4):
|
||||
(ux, uy), (vx, vy) = u, v
|
||||
return (round(ux + (vx - ux) / 2, precision),
|
||||
round(uy + (vy - uy) / 2, precision),)
|
||||
|
||||
@staticmethod
|
||||
def gradient(graph, nodes):
|
||||
# independently move each mobile node in the direction of one
|
||||
# of its neighbors and compute the change in total_latency
|
||||
for u in nodes:
|
||||
for v in nodes[u]:
|
||||
upos, vpos = graph.node[u]['physical'].position, \
|
||||
graph.node[v]['physical'].position
|
||||
new_upos = Latency.nudge(upos, vpos)
|
||||
before = Latency.total_latency(graph)
|
||||
graph.node[u]['physical'].position = new_upos
|
||||
after = Latency.total_latency(graph)
|
||||
graph.node[u]['physical'].position = upos
|
||||
logger.info(
|
||||
f'Gradient {u} ⇋ {v}; u to {new_upos}; grad {after-before}')
|
||||
yield u, v, new_upos, after - before
|
||||
|
||||
# their public API may include only the following
|
||||
# function for minimizing latency over a network
|
||||
@staticmethod
|
||||
def minimize(graph, *, n=5, threshold=1e-5 * 1e-9, d=None):
|
||||
mobile = {k: list(graph.edge[k]) for k, v in graph.node.items()
|
||||
if not v['physical'].fixed}
|
||||
# XXX: VERY sloppy optimization repeatedly compute gradients
|
||||
# nudging nodes in the direction of the best latency improvement
|
||||
for it in count():
|
||||
gradients = u, v, pos, grad = min(Latency.gradient(graph, mobile),
|
||||
key=lambda rv: rv[-1])
|
||||
logger.info(f'Best gradient {u} ⇋ {v}; u to {pos}; grad {grad}')
|
||||
logger.info(
|
||||
f'Moving {u} in dir of {v} for {grad/1e-12:.2f} ps gain')
|
||||
graph.node[u]['physical'].position = pos
|
||||
if d:
|
||||
d.send((f'step #{it}', graph))
|
||||
if it > n or abs(grad) < threshold: # stop after N iterations
|
||||
break # or if improvement < threshold
|
||||
|
||||
# our Network object is just a networkx.DiGraph
|
||||
# with some additional storage for graph-level
|
||||
# data
|
||||
# NOTE: this may actually need to be a networkx.MultiDiGraph?
|
||||
# in the event that graphs may have multiple links
|
||||
# with distance edge data connecting them
|
||||
|
||||
|
||||
def Network(*args, data=None, **kwargs):
|
||||
n = DiGraph()
|
||||
n.data = {} if data is None else data
|
||||
return n
|
||||
|
||||
|
||||
def draw_changes():
|
||||
''' simple helper to draw changes to the network '''
|
||||
fig = figure()
|
||||
for n in count():
|
||||
data = yield
|
||||
if not data:
|
||||
break
|
||||
for i, ax in enumerate(fig.axes):
|
||||
ax.change_geometry(n + 1, 1, i + 1)
|
||||
ax = fig.add_subplot(n + 1, 1, n + 1)
|
||||
title, network, *edge_labels = data
|
||||
node_data = {u: (u, network.node[u]['physical'].position)
|
||||
for u in network.nodes()}
|
||||
edge_data = {(u, v): (network.get_edge_data(u, v)['physical'].distance(network),
|
||||
network.get_edge_data(u, v)['physical'].speed,)
|
||||
for u, v in network.edges()}
|
||||
labels = {u: f'{n}' for u, (n, p) in node_data.items()}
|
||||
distances = {(u, v): f'dist = {d:.2f} m\nspeed = {s/1e6:.2f}e6 m/s'
|
||||
for (u, v), (d, s) in edge_data.items()}
|
||||
|
||||
pos = {u: p for u, (_, p) in node_data.items()}
|
||||
label_pos = pos
|
||||
|
||||
draw_networkx_edges(network, alpha=.25, width=.5, pos=pos, ax=ax)
|
||||
draw_networkx_nodes(network, node_size=600, alpha=.5, pos=pos, ax=ax)
|
||||
draw_networkx_labels(network, labels=labels,
|
||||
pos=pos, label_pos=.3, ax=ax)
|
||||
if edge_labels:
|
||||
draw_networkx_edge_labels(
|
||||
network, edge_labels=distances, pos=pos, font_size=8, ax=ax)
|
||||
|
||||
ax.set_title(title)
|
||||
ax.set_axis_off()
|
||||
|
||||
with catch_warnings():
|
||||
simplefilter('ignore')
|
||||
show()
|
||||
yield
|
||||
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('-v', action='count')
|
||||
|
||||
if __name__ == '__main__':
|
||||
from logging import basicConfig, INFO
|
||||
args = parser.parse_args()
|
||||
if args.v:
|
||||
basicConfig(level=INFO)
|
||||
|
||||
print('''
|
||||
Sample network has nodes:
|
||||
a ⇋ b ⇋ c ⇋ d
|
||||
|
||||
signals a ⇋ b travel at speed of light through copper
|
||||
signals b ⇋ c travel at speed of light through water
|
||||
signals c ⇋ d travel at speed of light through water
|
||||
|
||||
all connections are bidirectional
|
||||
|
||||
a, c, d are fixed position
|
||||
b is mobile
|
||||
|
||||
How can we move b to maximise speed of transmission a ⇋ d?
|
||||
''')
|
||||
|
||||
# create network
|
||||
n = Network()
|
||||
for name, fixed, (x, y) in [('a', True, (0, 0)),
|
||||
('b', False, (5, 5)),
|
||||
('c', True, (10, 10)),
|
||||
('d', True, (20, 20)), ]:
|
||||
n.add_node(name, physical=Physical.Node(fixed=fixed, position=(x, y)))
|
||||
for u, v, speed in [('a', 'b', 299790000),
|
||||
('b', 'c', 225000000),
|
||||
('c', 'd', 225000000), ]:
|
||||
n.add_edge(u, v, physical=Physical.Edge(speed=speed, endpoints=(u, v)))
|
||||
n.add_edge(v, u, physical=Physical.Edge(speed=speed, endpoints=(v, u)))
|
||||
|
||||
d = draw_changes()
|
||||
next(d)
|
||||
d.send(('initial', n, True))
|
||||
|
||||
# core computation
|
||||
latency = Latency.latency(n, 'a', 'd')
|
||||
total_latency = Latency.total_latency(n)
|
||||
Latency.minimize(n, d=d)
|
||||
total_latency = Latency.total_latency(n)
|
||||
|
||||
print('Before:')
|
||||
print(f' Current latency from a ⇋ d: {latency/1e-9:.2f} ns')
|
||||
print(f' Total latency on n: {total_latency/1e-9:.2f} ns')
|
||||
|
||||
print('After:')
|
||||
print(f' Total latency on n: {total_latency/1e-9:.2f} ns')
|
||||
|
||||
next(d)
|
||||
160
gnpy/examples/config/config_ex1.json
Normal file
160
gnpy/examples/config/config_ex1.json
Normal file
@@ -0,0 +1,160 @@
|
||||
{
|
||||
"elements": [{
|
||||
"id": "span0",
|
||||
"type": "fiber",
|
||||
"name": "ggg",
|
||||
"description": "Booster_Connection ffff",
|
||||
"parameters": {
|
||||
"length": 80.0,
|
||||
"dispersion": null,
|
||||
"dispersion_slope": 16.7,
|
||||
"pmd": 0.0,
|
||||
"loss": 0.2,
|
||||
"fiber_type": "SMF-28e",
|
||||
"nonlinear_coef": 0.0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "span1",
|
||||
"type": "fiber",
|
||||
"name": "",
|
||||
"description": "Booster_Connection",
|
||||
"parameters": {
|
||||
"length": 100.0,
|
||||
"dispersion": null,
|
||||
"dispersion_slope": 16.7,
|
||||
"pmd": 0.0,
|
||||
"loss": 0.2,
|
||||
"fiber_type": "SMF-28e",
|
||||
"nonlinear_coef": 0.0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "span2",
|
||||
"type": "fiber",
|
||||
"name": "",
|
||||
"description": "Booster_Connection",
|
||||
"parameters": {
|
||||
"length": 80.0,
|
||||
"dispersion": null,
|
||||
"dispersion_slope": 16.7,
|
||||
"pmd": 0.0,
|
||||
"loss": 0.2,
|
||||
"fiber_type": "SMF-28e",
|
||||
"nonlinear_coef": 0.0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "amp0",
|
||||
"name": "Booster",
|
||||
"description": "This is the booster amp",
|
||||
"type": "edfa",
|
||||
"parameters": {
|
||||
"manufacturer": "acme corp",
|
||||
"manufacturer_pn": "acme model1",
|
||||
"manufacturer_gain_profile": "?",
|
||||
"frequencies": [193.95],
|
||||
"gain": [15.0],
|
||||
"nf": [8],
|
||||
"input_voa": 0.0,
|
||||
"output_voa": 14.0,
|
||||
"pin": [-10],
|
||||
"ptarget": [0],
|
||||
"pmax": 23.0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "amp1",
|
||||
"name": "line",
|
||||
"description": "This is the line amp",
|
||||
"type": "edfa",
|
||||
"parameters": {
|
||||
"manufacturer": "acme corp",
|
||||
"manufacturer_pn": "acme model2",
|
||||
"manufacturer_gain_profile": null,
|
||||
"frequencies": [193.95],
|
||||
"gain": [24.0],
|
||||
"nf": [5.5],
|
||||
"input_voa": 0.0,
|
||||
"output_voa": 4.0,
|
||||
"pin": [-20],
|
||||
"ptarget": [0],
|
||||
"pmax": 23.0
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "amp2",
|
||||
"name": "PreAmp",
|
||||
"description": "This is the preamp",
|
||||
"type": "edfa",
|
||||
"parameters": {
|
||||
"manufacturer": "acme corp",
|
||||
"manufacturer_pn": "acme model2",
|
||||
"manufacturer_gain_profile": null,
|
||||
"frequencies": [193.95],
|
||||
"gain": [24.0],
|
||||
"nf": [5.5],
|
||||
"nf_vs_gain": [[24, 5.5], [25, 5.6]],
|
||||
"input_voa": 0.0,
|
||||
"output_voa": 4.0,
|
||||
"pin": [-20],
|
||||
"ptarget": [0],
|
||||
"pmax": 23.0
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "tx",
|
||||
"name": "tx1",
|
||||
"id": "tx1",
|
||||
"description": "transmitter 1",
|
||||
"parameters": {
|
||||
"channels": [{
|
||||
"manufacturer": "acme corp",
|
||||
"manufacturer_pn": "acme model1",
|
||||
"frequency": 193.95,
|
||||
"modulation": "QPSK",
|
||||
"baud_rate": 32.0,
|
||||
"capacity": 100,
|
||||
"psd": "acme_model1_mode_1",
|
||||
"dispersion_precomp": 0,
|
||||
"fecOH": 25.0,
|
||||
"filter": "rrc",
|
||||
"filter_params": [0.4],
|
||||
"polarization_mux": "interleaved",
|
||||
"osnr": 40.0,
|
||||
"power": -10.0
|
||||
},
|
||||
{
|
||||
"manufacturer": "acme corp",
|
||||
"manufacturer_pn": "acme model1",
|
||||
"frequency": 194.15,
|
||||
"modulation": "QPSK",
|
||||
"baud_rate": 32.0,
|
||||
"capacity": 100,
|
||||
"psd": "acme_model1_mode_1",
|
||||
"dispersion_precomp": 0,
|
||||
"fecOH": 25.0,
|
||||
"filter": "rrc",
|
||||
"filter_params": [0.4],
|
||||
"polarization_mux": "interleaved",
|
||||
"osnr": 40.0,
|
||||
"power": -5.0
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "rx",
|
||||
"name": "rx1",
|
||||
"id": "rx1",
|
||||
"description": "receiver 1",
|
||||
"parameters":{
|
||||
"sensitivity": -7
|
||||
}
|
||||
}
|
||||
],
|
||||
"topology": [
|
||||
["tx1", "amp0", "span0", "amp1", "span1", "amp2", "rx1"],
|
||||
["tx1", "span2", "rx1"]
|
||||
]
|
||||
}
|
||||
36
gnpy/examples/sim_ex.py
Normal file
36
gnpy/examples/sim_ex.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from gnpy import Network
|
||||
from gnpy.utils import read_config
|
||||
from os.path import realpath, join, dirname
|
||||
|
||||
if __name__ == '__main__':
|
||||
basedir = dirname(realpath(__file__))
|
||||
filename = join(basedir, 'config/config_ex1.json')
|
||||
config = read_config(filename)
|
||||
nw = Network(config)
|
||||
nw.propagate_all_paths()
|
||||
|
||||
# output OSNR propagation
|
||||
for path in nw.tr_paths:
|
||||
print(' → '.join(x.id for x in path.path))
|
||||
for u, v in path.edge_list:
|
||||
channels = nw.g[u][v]['channels']
|
||||
channel_info = ('\n' + ' ' * 24).join(
|
||||
' '.join([f'freq: {x["frequency"]:7.2f}',
|
||||
f'osnr: {x["osnr"]:7.2f}',
|
||||
f'power: {x["power"]:7.2f}'])
|
||||
for x in channels)
|
||||
print(f'{u.id:^10s} → {v.id:^10s} {channel_info}')
|
||||
|
||||
if 1: # plot network graph
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
layout = nx.spring_layout(nw.g)
|
||||
nx.draw_networkx_nodes(nw.g, layout, node_size=1000,
|
||||
node_color='b', alpha=0.2, node_shape='s')
|
||||
nx.draw_networkx_labels(nw.g, layout)
|
||||
nx.draw_networkx_edges(nw.g, layout, width=2,
|
||||
alpha=0.3, edge_color='green')
|
||||
nx.draw_networkx_edge_labels(nw.g, layout, font_size=10)
|
||||
plt.rcdefaults()
|
||||
plt.axis('off')
|
||||
plt.show()
|
||||
604
gnpy/gnpy.py
604
gnpy/gnpy.py
@@ -30,9 +30,9 @@ def raised_cosine_comb(f, rs, roll_off, center_freq, power):
|
||||
:param power: Array of channel powers in W. One per channel
|
||||
:return: PSD of the WDM comb evaluated over f
|
||||
"""
|
||||
ts_arr = 1 / rs
|
||||
passband_arr = (1 - roll_off) / (2 * ts_arr)
|
||||
stopband_arr = (1 + roll_off) / (2 * ts_arr)
|
||||
ts_arr = 1.0 / rs
|
||||
passband_arr = (1.0 - roll_off) / (2.0 * ts_arr)
|
||||
stopband_arr = (1.0 + roll_off) / (2.0 * ts_arr)
|
||||
g = power / rs
|
||||
psd = np.zeros(np.shape(f))
|
||||
for ind in range(np.size(center_freq)):
|
||||
@@ -46,7 +46,7 @@ def raised_cosine_comb(f, rs, roll_off, center_freq, power):
|
||||
if roll_off[ind] == 0:
|
||||
psd = np.where(tf <= 0, g_ch, 0.) + psd
|
||||
else:
|
||||
psd = g_ch * (np.where(tf <= 0, 1., 0.) + 1 / 2 * (1 + np.cos(np.pi * ts / roll_off[ind] *
|
||||
psd = g_ch * (np.where(tf <= 0, 1., 0.) + 1.0 / 2.0 * (1 + np.cos(np.pi * ts / roll_off[ind] *
|
||||
tf)) * np.where(tf > 0, 1., 0.) *
|
||||
np.where(np.abs(ff) <= stopband, 1., 0.)) + psd
|
||||
|
||||
@@ -62,8 +62,8 @@ def fwm_eff(a, Lspan, b2, ff):
|
||||
:param ff: Array of Frequency points in THz
|
||||
:return: FWM efficiency rho
|
||||
"""
|
||||
rho = np.power(np.abs((1 - np.exp(-2 * a * Lspan + 1j * 4 * np.pi * np.pi * b2 * Lspan * ff)) / (
|
||||
2 * a - 1j * 4 * np.pi * np.pi * b2 * ff)), 2)
|
||||
rho = np.power(np.abs((1.0 - np.exp(-2.0 * a * Lspan + 1j * 4.0 * np.pi * np.pi * b2 * Lspan * ff)) / (
|
||||
2.0 * a - 1j * 4.0 * np.pi * np.pi * b2 * ff)), 2)
|
||||
return rho
|
||||
|
||||
|
||||
@@ -81,27 +81,27 @@ def get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df_dense):
|
||||
:return: Non uniformly defined frequency array
|
||||
"""
|
||||
f_dense = np.arange(f_dense_low, f_dense_up, df_dense)
|
||||
k = Bopt / 2 / (Bopt / 2 - max_step) # Compute Step ratio for log-spaced array definition
|
||||
k = Bopt / 2.0 / (Bopt / 2.0 - max_step) # Compute Step ratio for log-spaced array definition
|
||||
if f < 0:
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_low)) / np.log(k) + 1)
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_low)) / np.log(k) + 1.0)
|
||||
f1_short = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_short, 0.0, -1.0) - 1.0))
|
||||
k = (Bopt / 2 + (np.abs(f_dense_up) - f_dense_low)) / (Bopt / 2 - max_step + (np.abs(f_dense_up) - f_dense_up))
|
||||
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_up) - f_dense_up)) / abs(f_dense_up)) * 1 / np.log(k) + 1)
|
||||
f1_long = np.abs(f_dense_up) * np.power(k, (np.arange(1, Nlog_long + 1) - 1)) - (
|
||||
k = (Bopt / 2 + (np.abs(f_dense_up) - f_dense_low)) / (Bopt / 2.0 - max_step + (np.abs(f_dense_up) - f_dense_up))
|
||||
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_up) - f_dense_up)) / abs(f_dense_up)) * 1.0 / np.log(k) + 1.0)
|
||||
f1_long = np.abs(f_dense_up) * np.power(k, (np.arange(1, Nlog_long + 1) - 1.0)) - (
|
||||
np.abs(f_dense_up) - f_dense_up)
|
||||
f1_array = np.concatenate([f1_short, f_dense[1:], f1_long])
|
||||
else:
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_up)) / np.log(k) + 1)
|
||||
f1_short = f_dense_up * np.power(k, np.arange(1, Nlog_short + 1) - 1)
|
||||
k = (Bopt / 2 + (abs(f_dense_low) + f_dense_low)) / (Bopt / 2 - max_step + (abs(f_dense_low) + f_dense_low))
|
||||
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_up)) / np.log(k) + 1.0)
|
||||
f1_short = f_dense_up * np.power(k, np.arange(1, Nlog_short + 1.0) - 1.0)
|
||||
k = (Bopt / 2.0 + (abs(f_dense_low) + f_dense_low)) / (Bopt / 2.0 - max_step + (abs(f_dense_low) + f_dense_low))
|
||||
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_low) + f_dense_low)) / np.abs(f_dense_low)) / np.log(k) + 1)
|
||||
f1_long = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_long, 0, -1) - 1)) + (
|
||||
f1_long = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_long, 0, -1) - 1.0)) + (
|
||||
abs(f_dense_low) + f_dense_low)
|
||||
f1_array = np.concatenate([f1_long, f_dense[1:], f1_short])
|
||||
return f1_array
|
||||
|
||||
|
||||
def GN_integral(b2, Lspan, a_db, gam, f_ch, rs, roll_off, power, Nch, model_param):
|
||||
def GN_integral(b2, Lspan, a_db, gam, f_ch, b_ch, roll_off, power, Nch, model_param):
|
||||
""" GN_integral computes the GN reference formula via smart brute force integration. The Gaussian Noise model is
|
||||
applied in its incoherent form (phased-array factor =1). The function computes the integral by columns: for each f1,
|
||||
a non-uniformly spaced f2 array is generated, and the integrand function is computed there. At the end of the loop
|
||||
@@ -112,7 +112,7 @@ def GN_integral(b2, Lspan, a_db, gam, f_ch, rs, roll_off, power, Nch, model_para
|
||||
:param a_db: Fiber loss coeffiecient in dB/km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param rs: Channels' Symbol Rates in TBaud. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth. Array of size 1xNch
|
||||
:param roll_off: Channels' Roll-off factors [0,1). Array of size 1xNch
|
||||
:param power: Channels' power values in W. Array of size 1xNch
|
||||
:param Nch: Number of channels. Scalar
|
||||
@@ -131,13 +131,13 @@ def GN_integral(b2, Lspan, a_db, gam, f_ch, rs, roll_off, power, Nch, model_para
|
||||
n_grid = model_param['n_grid']
|
||||
n_grid_min = model_param['n_grid_min']
|
||||
f_array = model_param['f_array']
|
||||
fmax = (f_ch[-1] - (rs[-1] / 2)) - (f_ch[0] - (rs[0] / 2)) # Get frequency limit
|
||||
fmax = (f_ch[-1] - (b_ch[-1] / 2.0)) - (f_ch[0] - (b_ch[0] / 2.0)) # Get frequency limit
|
||||
f2eval = np.max(np.diff(f_ch))
|
||||
Bopt = f2eval * Nch # Overall optical bandwidth [THz]
|
||||
min_step = f2eval / n_grid # Minimum integration step
|
||||
max_step = f2eval / n_grid_min # Maximum integration step
|
||||
f_dense_start = np.abs(
|
||||
np.sqrt(np.power(alpha_lin, 2) / (4 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1)) / f2eval)
|
||||
np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / f2eval)
|
||||
f_ind_eval = 0
|
||||
GNLI = np.full(f_array.size, np.nan) # Pre-allocate results
|
||||
for f in f_array: # Loop over f
|
||||
@@ -156,12 +156,12 @@ def GN_integral(b2, Lspan, a_db, gam, f_ch, rs, roll_off, power, Nch, model_para
|
||||
df = f_dense_width / n_grid_dense
|
||||
# Get non-uniformly spaced f1 array
|
||||
f1_array = get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df)
|
||||
G1 = raised_cosine_comb(f1_array, rs, roll_off, f_ch, power) # Get corresponding spectrum
|
||||
G1 = raised_cosine_comb(f1_array, b_ch, roll_off, f_ch, power) # Get corresponding spectrum
|
||||
Gpart = np.zeros(f1_array.size) # Pre-allocate partial result for inner integral
|
||||
f_ind = 0
|
||||
for f1 in f1_array: # Loop over f1
|
||||
if f1 != f:
|
||||
f_lim = np.sqrt(np.power(alpha_lin, 2) / (4 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1)) / (
|
||||
f_lim = np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / (
|
||||
f1 - f) + f
|
||||
f2_dense_up = np.maximum(f_lim, -f_lim)
|
||||
f2_dense_low = np.minimum(f_lim, -f_lim)
|
||||
@@ -183,21 +183,21 @@ def GN_integral(b2, Lspan, a_db, gam, f_ch, rs, roll_off, power, Nch, model_para
|
||||
f2_array = get_freqarray(f, Bopt, fmax, max_step, f2_dense_low, f2_dense_up, df2)
|
||||
f2_array = f2_array[f2_array >= f1] # Do not consider points below the bisector of quadrants I and III
|
||||
if f2_array.size > 0:
|
||||
G2 = raised_cosine_comb(f2_array, rs, roll_off, f_ch, power) # Get spectrum there
|
||||
G2 = raised_cosine_comb(f2_array, b_ch, roll_off, f_ch, power) # Get spectrum there
|
||||
f3_array = f1 + f2_array - f # Compute f3
|
||||
G3 = raised_cosine_comb(f3_array, rs, roll_off, f_ch, power) # Get spectrum over f3
|
||||
G3 = raised_cosine_comb(f3_array, b_ch, roll_off, f_ch, power) # Get spectrum over f3
|
||||
G = G2 * G3 * G1[f_ind]
|
||||
if np.count_nonzero(G):
|
||||
FWM_eff = fwm_eff(alpha_lin, Lspan, b2, (f1 - f) * (f2_array - f)) # Compute FWM efficiency
|
||||
Gpart[f_ind] = 2 * np.trapz(FWM_eff * G, f2_array) # Compute inner integral
|
||||
Gpart[f_ind] = 2.0 * np.trapz(FWM_eff * G, f2_array) # Compute inner integral
|
||||
f_ind += 1
|
||||
# Compute outer integral. Nominal span loss already compensated
|
||||
GNLI[f_ind_eval] = 16 / 27 * gam * gam * np.trapz(Gpart, f1_array)
|
||||
GNLI[f_ind_eval] = 16.0 / 27.0 * gam * gam * np.trapz(Gpart, f1_array)
|
||||
f_ind_eval += 1 # Next frequency
|
||||
return GNLI # Return GNLI array in W/THz and the array of the corresponding frequencies
|
||||
|
||||
|
||||
def compute_psi(b2, l_eff_a, f_ch, channel_index, interfering_index, rs):
|
||||
def compute_psi(b2, l_eff_a, f_ch, channel_index, interfering_index, b_ch):
|
||||
""" compute_psi computes the psi coefficient of the analytical formula.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
@@ -205,27 +205,27 @@ def compute_psi(b2, l_eff_a, f_ch, channel_index, interfering_index, rs):
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param channel_index: Index of the channel. Scalar
|
||||
:param interfering_index: Index of the interfering signal. Scalar
|
||||
:param rs: Channels' Symbol Rates in TBaud. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:return: psi: the coefficient
|
||||
"""
|
||||
b2 = np.abs(b2)
|
||||
|
||||
if channel_index == interfering_index: # The signal interfere with itself
|
||||
rs_sig = rs[channel_index]
|
||||
psi = np.arcsinh(0.5 * np.pi ** 2 * l_eff_a * b2 * rs_sig ** 2)
|
||||
if channel_index == interfering_index: # The signal interferes with itself
|
||||
b_ch_sig = b_ch[channel_index]
|
||||
psi = np.arcsinh(0.5 * np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig ** 2.0)
|
||||
else:
|
||||
f_sig = f_ch[channel_index]
|
||||
rs_sig = rs[channel_index]
|
||||
b_ch_sig = b_ch[channel_index]
|
||||
f_int = f_ch[interfering_index]
|
||||
rs_int = rs[interfering_index]
|
||||
b_ch_int = b_ch[interfering_index]
|
||||
del_f = f_sig - f_int
|
||||
psi = np.arcsinh(np.pi ** 2 * l_eff_a * b2 * rs_sig * (del_f + 0.5 * rs_int))
|
||||
psi -= np.arcsinh(np.pi ** 2 * l_eff_a * b2 * rs_sig * (del_f - 0.5 * rs_int))
|
||||
psi = np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f + 0.5 * b_ch_int))
|
||||
psi -= np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f - 0.5 * b_ch_int))
|
||||
|
||||
return psi
|
||||
|
||||
|
||||
def analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, rs, n_ch):
|
||||
def analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch):
|
||||
""" analytic_formula computes the analytical formula.
|
||||
|
||||
:param ind: index of the channel at which g_nli is computed. Scalar
|
||||
@@ -235,43 +235,43 @@ def analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, rs, n_ch):
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param g_ch: Power spectral density W/THz. Array of size 1xNch
|
||||
:param rs: Channels' Symbol Rates in TBaud. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:param n_ch: Number of channels. Scalar
|
||||
:return: g_nli: power spectral density in W/THz of the nonlinear interference
|
||||
"""
|
||||
ch_psd = g_ch[ind]
|
||||
b2 = abs(b2)
|
||||
|
||||
g_nli = 0
|
||||
g_nli = 0.0
|
||||
for n in np.arange(0, n_ch):
|
||||
psi = compute_psi(b2, l_eff_a, f_ch, ind, n, rs)
|
||||
g_nli += g_ch[n] * ch_psd ** 2 * psi
|
||||
psi = compute_psi(b2, l_eff_a, f_ch, ind, n, b_ch)
|
||||
g_nli += g_ch[n] * ch_psd ** 2.0 * psi
|
||||
|
||||
g_nli *= (16 / 27) * (gam * l_eff) ** 2 / (2 * np.pi * b2 * l_eff_a)
|
||||
g_nli *= (16.0 / 27.0) * (gam * l_eff) ** 2.0 / (2.0 * np.pi * b2 * l_eff_a)
|
||||
|
||||
return g_nli
|
||||
|
||||
|
||||
def gn_analytic(b2, l_span, a_db, gam, f_ch, rs, power, n_ch):
|
||||
def gn_analytic(b2, l_span, a_db, gam, f_ch, b_ch, power, n_ch):
|
||||
""" gn_analytic computes the GN reference formula via analytical solution.
|
||||
|
||||
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
:param l_span: Fiber Span length in km. Scalar
|
||||
:param a_db: Fiber loss coeffiecient in dB/km. Scalar
|
||||
:param a_db: Fiber loss coefficient in dB/km. Scalar
|
||||
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
|
||||
:param rs: Channels' Symbol Rates in TBaud. Array of size 1xNch
|
||||
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
|
||||
:param power: Channels' power values in W. Array of size 1xNch
|
||||
:param n_ch: Number of channels. Scalar
|
||||
:return: g_nli: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
|
||||
"""
|
||||
g_ch = power / rs
|
||||
g_ch = power / b_ch
|
||||
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
|
||||
l_eff = (1 - np.exp(-2 * alpha_lin * l_span)) / (2 * alpha_lin) # Effective length
|
||||
l_eff_a = 1 / (2 * alpha_lin) # Asymptotic effective length
|
||||
l_eff = (1.0 - np.exp(-2.0 * alpha_lin * l_span)) / (2.0 * alpha_lin) # Effective length
|
||||
l_eff_a = 1.0 / (2.0 * alpha_lin) # Asymptotic effective length
|
||||
g_nli = np.zeros(f_ch.size)
|
||||
for ind in np.arange(0, f_ch.size):
|
||||
g_nli[ind] = analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, rs, n_ch)
|
||||
g_nli[ind] = analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch)
|
||||
|
||||
return g_nli
|
||||
|
||||
@@ -289,9 +289,10 @@ def get_f_computed_interp(f_ch, n_not_interp):
|
||||
n_not_interp = num_ch
|
||||
|
||||
# Compute f_nli_comp
|
||||
n_not_interp_left = np.ceil((n_not_interp - 1) / 2)
|
||||
n_not_interp_right = np.floor((n_not_interp - 1) / 2)
|
||||
n_not_interp_left = np.ceil((n_not_interp - 1.0) / 2.0)
|
||||
n_not_interp_right = np.floor((n_not_interp - 1.0) / 2.0)
|
||||
central_index = len(f_ch) // 2
|
||||
print(central_index)
|
||||
|
||||
f_nli_central = np.array([f_ch[central_index]], copy=True)
|
||||
|
||||
@@ -349,19 +350,19 @@ def gn_model(spectrum_param, fiber_param, accuracy_param, n_cores):
|
||||
:param spectrum_param: Dictionary with spectrum parameters
|
||||
spectrum_param['num_ch']: Number of channels. Scalar
|
||||
spectrum_param['f_ch']: Baseband channels center frequencies in THz. Array of size 1xnum_ch
|
||||
spectrum_param['rs']: Channels' Symbol Rates in TBaud. Array of size 1xnum_ch
|
||||
spectrum_param['b_ch']: Channels' -3 dB band [THz]. Array of size 1xnum_ch
|
||||
spectrum_param['roll_off']: Channels' Roll-off factors [0,1). Array of size 1xnum_ch
|
||||
spectrum_param['power']: Channels' power values in W. Array of size 1xnum_ch
|
||||
:param fiber_param: Dictionary with the parameters of the fiber
|
||||
fiber_param['a_db']: Fiber loss coeffiecient in dB/km. Scalar
|
||||
fiber_param['alpha']: Fiber loss coefficient in dB/km. Scalar
|
||||
fiber_param['span_length']: Fiber Span length in km. Scalar
|
||||
fiber_param['beta2']: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
fiber_param['beta_2']: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
fiber_param['gamma']: Fiber nonlinear coefficient in 1/W/km. Scalar
|
||||
:param accuracy_param: Dictionary with model parameters for accuracy tuning
|
||||
accuracy_param['is_analytic']: A boolean indicating if you want to compute the NLI through
|
||||
the analytic formula (is_analytic = True) of the smart brute force integration (is_analytic =
|
||||
False). Boolean
|
||||
accuracy_param['n_not_interp']: The number of NLI which will be calculated. Others are
|
||||
accuracy_param['points_not_interp']: The number of NLI which will be calculated. Others are
|
||||
interpolated
|
||||
accuracy_param['kind_interp']: The kind of interpolation using the function
|
||||
scipy.interpolate.interp1d
|
||||
@@ -380,19 +381,19 @@ def gn_model(spectrum_param, fiber_param, accuracy_param, n_cores):
|
||||
# Take signal parameters
|
||||
num_ch = spectrum_param['num_ch']
|
||||
f_ch = spectrum_param['f_ch']
|
||||
rs = spectrum_param['rs']
|
||||
b_ch = spectrum_param['b_ch']
|
||||
roll_off = spectrum_param['roll_off']
|
||||
power = spectrum_param['power']
|
||||
|
||||
# Take fiber parameters
|
||||
a_db = fiber_param['a_db']
|
||||
a_db = fiber_param['alpha']
|
||||
l_span = fiber_param['span_length']
|
||||
beta2 = fiber_param['beta2']
|
||||
beta2 = fiber_param['beta_2']
|
||||
gam = fiber_param['gamma']
|
||||
|
||||
# Take accuracy parameters
|
||||
is_analytic = accuracy_param['is_analytic']
|
||||
n_not_interp = accuracy_param['n_not_interp']
|
||||
n_not_interp = accuracy_param['points_not_interp']
|
||||
kind_interp = accuracy_param['kind_interp']
|
||||
th_fwm = accuracy_param['th_fwm']
|
||||
n_points = accuracy_param['n_points']
|
||||
@@ -400,7 +401,7 @@ def gn_model(spectrum_param, fiber_param, accuracy_param, n_cores):
|
||||
|
||||
# Computing NLI
|
||||
if is_analytic: # Analytic solution
|
||||
g_nli_comp = gn_analytic(beta2, l_span, a_db, gam, f_ch, rs, power, num_ch)
|
||||
g_nli_comp = gn_analytic(beta2, l_span, a_db, gam, f_ch, b_ch, power, num_ch)
|
||||
f_nli_comp = np.copy(f_ch)
|
||||
g_nli_interp = []
|
||||
f_nli_interp = []
|
||||
@@ -410,9 +411,494 @@ def gn_model(spectrum_param, fiber_param, accuracy_param, n_cores):
|
||||
model_param = {'min_FWM_inv': th_fwm, 'n_grid': n_points, 'n_grid_min': n_points_min,
|
||||
'f_array': np.array(f_nli_comp, copy=True)}
|
||||
|
||||
g_nli_comp = GN_integral(beta2, l_span, a_db, gam, f_ch, rs, roll_off, power, num_ch, model_param)
|
||||
g_nli_comp = GN_integral(beta2, l_span, a_db, gam, f_ch, b_ch, roll_off, power, num_ch, model_param)
|
||||
|
||||
# Interpolation
|
||||
g_nli_interp = interpolate_in_range(f_nli_comp, g_nli_comp, f_nli_interp, kind_interp)
|
||||
|
||||
a_zero = fiber_param['alpha'] * fiber_param['span_length']
|
||||
a_tilting = fiber_param['alpha_1st'] * fiber_param['span_length']
|
||||
|
||||
attenuation_db_comp = compute_attenuation_profile(a_zero, a_tilting, f_nli_comp)
|
||||
attenuation_lin_comp = 10 ** (-abs(attenuation_db_comp) / 10)
|
||||
|
||||
g_nli_comp *= attenuation_lin_comp
|
||||
|
||||
attenuation_db_interp = compute_attenuation_profile(a_zero, a_tilting, f_nli_interp)
|
||||
attenuation_lin_interp = 10 ** (-np.abs(attenuation_db_interp) / 10)
|
||||
|
||||
g_nli_interp *= attenuation_lin_interp
|
||||
|
||||
return g_nli_comp, f_nli_comp, g_nli_interp, f_nli_interp
|
||||
|
||||
|
||||
def compute_gain_profile(gain_zero, gain_tilting, freq):
|
||||
""" compute_gain_profile evaluates the gain at the frequencies freq.
|
||||
|
||||
:param gain_zero: the gain at f=0 in dB. Scalar
|
||||
:param gain_tilting: the gain tilt in dB/THz. Scalar
|
||||
:param freq: the baseband frequencies at which the gain profile is computed in THz. Array
|
||||
:return: gain: the gain profile in dB
|
||||
"""
|
||||
gain = gain_zero + gain_tilting * freq
|
||||
return gain
|
||||
|
||||
|
||||
def compute_ase_noise(noise_fig, gain, central_freq, freq):
|
||||
""" compute_ase_noise evaluates the ASE spectral density at the frequencies freq.
|
||||
|
||||
:param noise_fig: the amplifier noise figure in dB. Scalar
|
||||
:param gain: the gain profile in dB at the frequencies contained in freq array. Array
|
||||
:param central_freq: the central frequency of the WDM comb. Scalar
|
||||
:param freq: the baseband frequencies at which the ASE noise is computed in THz. Array
|
||||
:return: g_ase: the ase noise profile
|
||||
"""
|
||||
# the Planck constant in W/THz^2
|
||||
planck = (6.62607004 * 1e-34) * 1e24
|
||||
|
||||
# Conversion from dB to linear
|
||||
gain_lin = np.power(10, gain / 10.0)
|
||||
noise_fig_lin = np.power(10, noise_fig / 10.0)
|
||||
|
||||
g_ase = (gain_lin - 1) * noise_fig_lin * planck * (central_freq + freq)
|
||||
return g_ase
|
||||
|
||||
|
||||
def compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, freq):
|
||||
""" compute_edfa_profile evaluates the gain profile and the ASE spectral density at the frequencies freq.
|
||||
|
||||
:param gain_zero: the gain at f=0 in dB. Scalar
|
||||
:param gain_tilting: the gain tilt in dB/THz. Scalar
|
||||
:param noise_fig: the amplifier noise figure in dB. Scalar
|
||||
:param central_freq: the central frequency of the WDM comb. Scalar
|
||||
:param freq: the baseband frequencies at which the ASE noise is computed in THz. Array
|
||||
:return: gain: the gain profile in dB
|
||||
:return: g_ase: the ase noise profile in W/THz
|
||||
"""
|
||||
gain = compute_gain_profile(gain_zero, gain_tilting, freq)
|
||||
g_ase = compute_ase_noise(noise_fig, gain, central_freq, freq)
|
||||
|
||||
return gain, g_ase
|
||||
|
||||
|
||||
def compute_attenuation_profile(a_zero, a_tilting, freq):
|
||||
"""compute_attenuation_profile returns the attenuation profile at the frequencies freq
|
||||
|
||||
:param a_zero: the attenuation [dB] @ the baseband central frequency. Scalar
|
||||
:param a_tilting: the attenuation tilt in dB/THz. Scalar
|
||||
:param freq: the baseband frequencies at which attenuation is computed [THz]. Array
|
||||
:return: attenuation: the attenuation profile in dB
|
||||
"""
|
||||
|
||||
if len(freq):
|
||||
attenuation = a_zero + a_tilting * freq
|
||||
|
||||
# abs in order to avoid ambiguity due to the sign convention
|
||||
attenuation = abs(attenuation)
|
||||
else:
|
||||
attenuation = []
|
||||
|
||||
return attenuation
|
||||
|
||||
|
||||
def passive_component(spectrum, a_zero, a_tilting, freq):
|
||||
"""passive_component updates the input spectrum with the attenuation described by a_zero and a_tilting
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param a_zero: attenuation at the central frequency [dB]. Scalar
|
||||
:param a_tilting: attenuation tilting [dB/THz]. Scalar
|
||||
:param freq: the baseband frequency of each WDM channel [THz]. Array
|
||||
:return: None
|
||||
"""
|
||||
attenuation_db = compute_attenuation_profile(a_zero, a_tilting, freq)
|
||||
attenuation_lin = 10 ** np.divide(-abs(attenuation_db), 10.0)
|
||||
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_ch'] *= attenuation_lin[index]
|
||||
spectrum['signals'][index]['p_nli'] *= attenuation_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] *= attenuation_lin[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def optical_amplifier(spectrum, gain_zero, gain_tilting, noise_fig, central_freq, freq, b_eq):
|
||||
"""optical_amplifier updates the input spectrum with the gain described by gain_zero and gain_tilting plus ASE noise
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param gain_zero: gain at the central frequency [dB]. Scalar
|
||||
:param gain_tilting: gain tilting [dB/THz]. Scalar
|
||||
:param noise_fig: the noise figure of the amplifier [dB]. Scalar
|
||||
:param central_freq: the central frequency of the optical band [THz]. Scalar
|
||||
:param freq: the central frequency of each WDM channel [THz]. Array
|
||||
:param b_eq: the equivalent -3 dB bandwidth of each WDM channel [THZ]. Array
|
||||
:return: None
|
||||
"""
|
||||
|
||||
gain_db, g_ase = compute_edfa_profile(gain_zero, gain_tilting, noise_fig, central_freq, freq)
|
||||
|
||||
p_ase = np.multiply(g_ase, b_eq)
|
||||
|
||||
gain_lin = 10 ** np.divide(gain_db, 10.0)
|
||||
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_ch'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_nli'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] *= gain_lin[index]
|
||||
spectrum['signals'][index]['p_ase'] += p_ase[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def fiber(spectrum, fiber_param, fiber_length, f_ch, b_ch, roll_off, control_param):
|
||||
""" fiber updates spectrum with the effects of the fiber
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param fiber_param: Dictionary with the parameters of the fiber
|
||||
fiber_param['alpha']: Fiber loss coeffiecient in dB/km. Scalar
|
||||
fiber_param['beta_2']: Fiber dispersion coefficient in ps/THz/km. Scalar
|
||||
fiber_param['n_2']: second-order nonlinear refractive index [m^2/W]. Scalar
|
||||
fiber_param['a_eff']: the effective area of the fiber [um^2]. Scalar
|
||||
:param fiber_length: the span length [km]. Scalar
|
||||
:param f_ch: the baseband frequencies of the WDM channels [THz]. Scalar
|
||||
:param b_ch: the -3 dB bandwidth of each WDM channel [THz]. Array
|
||||
:param roll_off: the roll off of each WDM channel. Array
|
||||
:param control_param: Dictionary with the control parameters
|
||||
control_param['save_each_comp']: a boolean flag. If true, it saves in output folder one spectrum file at
|
||||
the output of each component, otherwise it saves just the last spectrum. Boolean
|
||||
control_param['is_linear']: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider
|
||||
NLI. Boolean
|
||||
control_param['is_analytic']: a boolean flag. If true, the NLI is computed through the analytic
|
||||
formula, otherwise it uses the double integral. Warning: the double integral is very slow. Boolean
|
||||
control_param['points_not_interp']: if the double integral is used, it indicates how much points are
|
||||
calculated, others will be interpolated. Scalar
|
||||
control_param['kind_interp']: the interpolation method when double integral is used. String
|
||||
control_param['th_fwm']: he threshold of the four wave mixing efficiency for the double integral. Scalar
|
||||
control_param['n_points']: number of points in the high FWM efficiency region in which the double
|
||||
integral is computed. Scalar
|
||||
control_param['n_points_min']: number of points in which the double integral is computed in the low FWM
|
||||
efficiency region. Scalar
|
||||
control_param['n_cores']: number of cores for parallel computation [not yet implemented]. Scalar
|
||||
:return: None
|
||||
"""
|
||||
|
||||
n_cores = control_param['n_cores']
|
||||
|
||||
# Evaluation of NLI
|
||||
if not control_param['is_linear']:
|
||||
num_ch = len(spectrum['signals'])
|
||||
spectrum_param = {
|
||||
'num_ch': num_ch,
|
||||
'f_ch': f_ch,
|
||||
'b_ch': b_ch,
|
||||
'roll_off': roll_off
|
||||
}
|
||||
|
||||
p_ch = np.zeros(num_ch)
|
||||
for index, signal in enumerate(spectrum['signals']):
|
||||
p_ch[index] = signal['p_ch']
|
||||
|
||||
spectrum_param['power'] = p_ch
|
||||
fiber_param['span_length'] = fiber_length
|
||||
|
||||
nli_cmp, f_nli_cmp, nli_int, f_nli_int = gn_model(spectrum_param, fiber_param, control_param, n_cores)
|
||||
f_nli = np.concatenate((f_nli_cmp, f_nli_int))
|
||||
order = np.argsort(f_nli)
|
||||
g_nli = np.concatenate((nli_cmp, nli_int))
|
||||
g_nli = np.array(g_nli)[order]
|
||||
|
||||
p_nli = np.multiply(g_nli, b_ch)
|
||||
|
||||
a_zero = fiber_param['alpha'] * fiber_length
|
||||
a_tilting = fiber_param['alpha_1st'] * fiber_length
|
||||
|
||||
# Apply attenuation
|
||||
passive_component(spectrum, a_zero, a_tilting, f_ch)
|
||||
|
||||
# Apply NLI
|
||||
if not control_param['is_linear']:
|
||||
for index, s in enumerate(spectrum['signals']):
|
||||
spectrum['signals'][index]['p_nli'] += p_nli[index]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_frequencies_wdm(spectrum, sys_param):
|
||||
""" the function computes the central frequency of the WDM comb and the frequency of each channel.
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:param sys_param: a dictionary containing the system parameters:
|
||||
'f0': the starting frequency, i.e the frequency of the first spectral slot [THz]
|
||||
'ns': the number of spectral slots. The space between two slots is 6.25 GHz
|
||||
:return: f_cent: the central frequency of the WDM comb [THz]
|
||||
:return: f_ch: the baseband frequency of each WDM channel [THz]
|
||||
"""
|
||||
|
||||
delta_f = 6.25E-3
|
||||
# Evaluate the central frequency
|
||||
f0 = sys_param['f0']
|
||||
ns = sys_param['ns']
|
||||
|
||||
f_cent = f0 + ((ns // 2.0) * delta_f)
|
||||
|
||||
# Evaluate the baseband frequencies
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
f_ch = np.zeros(n_ch)
|
||||
count = 0
|
||||
for index, bool_laser in enumerate(spectrum['laser_position']):
|
||||
if bool_laser:
|
||||
f_ch[count] = (f0 - f_cent) + delta_f * index
|
||||
count += 1
|
||||
|
||||
return f_cent, f_ch
|
||||
|
||||
|
||||
def get_spectrum_param(spectrum):
|
||||
""" the function returns the number of WDM channels and 3 arrays containing the power, the equivalent bandwidth
|
||||
and the roll off of each WDM channel.
|
||||
|
||||
:param spectrum: the WDM spectrum to be attenuated. List of dictionaries
|
||||
:return: power: the power of each WDM channel [W]
|
||||
:return: b_eq: the equivalent bandwidth of each WDM channel [THz]
|
||||
:return: roll_off: the roll off of each WDM channel
|
||||
:return: p_ase: the power of the ASE noise [W]
|
||||
:return: p_nli: the power of NLI [W]
|
||||
:return: n_ch: the number of WDM channels
|
||||
"""
|
||||
|
||||
n_ch = spectrum['laser_position'].count(1)
|
||||
roll_off = np.zeros(n_ch)
|
||||
b_eq = np.zeros(n_ch)
|
||||
power = np.zeros(n_ch)
|
||||
p_ase = np.zeros(n_ch)
|
||||
p_nli = np.zeros(n_ch)
|
||||
for index, signal in enumerate(spectrum['signals']):
|
||||
b_eq[index] = signal['b_ch']
|
||||
roll_off[index] = signal['roll_off']
|
||||
power[index] = signal['p_ch']
|
||||
p_ase[index] = signal['p_ase']
|
||||
p_nli[index] = signal['p_nli']
|
||||
|
||||
return power, b_eq, roll_off, p_ase, p_nli, n_ch
|
||||
|
||||
|
||||
def change_component_ref(f_ref, link, fibers):
|
||||
""" it updates the reference frequency of OA gain, PC attenuation and fiber attenuation coefficient
|
||||
|
||||
:param f_ref: the new reference frequency [THz]. Scalar
|
||||
:param link: the link structure. A list in which each element indicates one link component (PC, OA or fiber). List
|
||||
:param fibers: a dictionary containing the description of each fiber type. Dictionary
|
||||
:return: None
|
||||
"""
|
||||
|
||||
light_speed = 3e8 # [m/s]
|
||||
|
||||
# Change reference to the central frequency f_cent for OA and PC
|
||||
for index, component in enumerate(link):
|
||||
if component['comp_cat'] is 'PC':
|
||||
|
||||
old_loss = component['loss']
|
||||
delta_loss = component['loss_tlt']
|
||||
old_ref = component['ref_freq']
|
||||
new_loss = old_loss + delta_loss * (f_ref - old_ref)
|
||||
|
||||
link[index]['ref_freq'] = f_ref
|
||||
link[index]['loss'] = new_loss
|
||||
|
||||
elif component['comp_cat'] is 'OA':
|
||||
|
||||
old_gain = component['gain']
|
||||
delta_gain = component['gain_tlt']
|
||||
old_ref = component['ref_freq']
|
||||
new_gain = old_gain + delta_gain * (f_ref - old_ref)
|
||||
|
||||
link[index]['ref_freq'] = f_ref
|
||||
link[index]['gain'] = new_gain
|
||||
|
||||
elif not component['comp_cat'] is 'fiber':
|
||||
|
||||
error_string = 'Error in link structure: the ' + str(index+1) + '-th component have unknown category \n'\
|
||||
+ 'allowed values are (case sensitive): PC, OA and fiber'
|
||||
print(error_string)
|
||||
|
||||
# Change reference to the central frequency f_cent for fiber
|
||||
for fib_type in fibers:
|
||||
old_ref = fibers[fib_type]['reference_frequency']
|
||||
old_alpha = fibers[fib_type]['alpha']
|
||||
alpha_1st = fibers[fib_type]['alpha_1st']
|
||||
new_alpha = old_alpha + alpha_1st * (f_ref - old_ref)
|
||||
|
||||
fibers[fib_type]['reference_frequency'] = f_ref
|
||||
fibers[fib_type]['alpha'] = new_alpha
|
||||
|
||||
fibers[fib_type]['gamma'] = (2 * np.pi) * (f_ref / light_speed) * \
|
||||
(fibers[fib_type]['n_2'] / fibers[fib_type]['a_eff']) * 1e27
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def compute_and_save_osnr(spectrum, flag_save=False, file_name='00', output_path='./output/'):
|
||||
""" Given the spectrum structure, the function returns the linear and non linear OSNR. If the boolean variable
|
||||
flag_save is true, the function also saves the osnr values for the central channel, the osnr for each channel and
|
||||
spectrum in a file with the name file_name, in the folder indicated by output_path
|
||||
|
||||
:param spectrum: the spectrum dictionary containing the laser position (a list of boolean) and the list signals,
|
||||
which is a list of dictionaries (one for each channel) containing:
|
||||
'b_ch': the -3 dB bandwidth of the signal [THz]
|
||||
'roll_off': the roll off of the signal
|
||||
'p_ch': the signal power [W]
|
||||
'p_nli': the equivalent nli power [W]
|
||||
'p_ase': the ASE noise [W]
|
||||
:param flag_save: if True it saves all the data, otherwise it doesn't
|
||||
:param file_name: the name of the file in which the variables are saved
|
||||
:param output_path: the path in which you want to save the file
|
||||
:return: osnr_lin_db: the linear OSNR [dB]
|
||||
:return: osnr_nli_db: the non-linear equivalent OSNR (in linear units, NOT in [dB]
|
||||
"""
|
||||
|
||||
# Get the parameters from spectrum
|
||||
p_ch, b_eq, roll_off, p_ase, p_nli, n_ch = get_spectrum_param(spectrum)
|
||||
|
||||
# Compute the linear OSNR
|
||||
if (p_ase == 0).any():
|
||||
osnr_lin = np.zeros(n_ch)
|
||||
for index, p_noise in enumerate(p_ase):
|
||||
if p_noise == 0:
|
||||
osnr_lin[index] = float('inf')
|
||||
else:
|
||||
osnr_lin[index] = p_ch[index] / p_noise
|
||||
|
||||
else:
|
||||
osnr_lin = np.divide(p_ch, p_ase)
|
||||
|
||||
# Compute the non-linear OSNR
|
||||
if ((p_ase + p_nli) == 0).any():
|
||||
osnr_nli = np.zeros(n_ch)
|
||||
for index, p_noise in enumerate(p_ase + p_nli):
|
||||
|
||||
if p_noise == 0:
|
||||
osnr_nli[index] = float('inf')
|
||||
else:
|
||||
osnr_nli[index] = p_ch[index] / p_noise
|
||||
else:
|
||||
osnr_nli = np.divide(p_ch, p_ase + p_nli)
|
||||
|
||||
# Compute linear and non linear OSNR for the central channel
|
||||
ind_c = n_ch // 2
|
||||
osnr_lin_central_channel_db = 10 * np.log10(osnr_lin[ind_c])
|
||||
osnr_nl_central_channel_db = 10 * np.log10(osnr_nli[ind_c])
|
||||
|
||||
# Conversion in dB
|
||||
osnr_lin_db = 10 * np.log10(osnr_lin)
|
||||
osnr_nli_db = 10 * np.log10(osnr_nli)
|
||||
|
||||
# Save spectrum, the non linear OSNR and the linear OSNR
|
||||
out_fle_name = output_path + file_name
|
||||
|
||||
if flag_save:
|
||||
|
||||
f = open(out_fle_name, 'w')
|
||||
f.write(''.join(('# Output parameters. The values of OSNR are evaluated in the -3 dB channel band', '\n\n')))
|
||||
f.write(''.join(('osnr_lin_central_channel_db = ', str(osnr_lin_central_channel_db), '\n\n')))
|
||||
f.write(''.join(('osnr_nl_central_channel_db = ', str(osnr_nl_central_channel_db), '\n\n')))
|
||||
f.write(''.join(('osnr_lin_db = ', str(osnr_lin_db), '\n\n')))
|
||||
f.write(''.join(('osnr_nl_db = ', str(osnr_nli_db), '\n\n')))
|
||||
f.write(''.join(('spectrum = ', str(spectrum), '\n')))
|
||||
|
||||
f.close()
|
||||
|
||||
return osnr_nli_db, osnr_lin_db
|
||||
|
||||
|
||||
def ole(spectrum, link, fibers, sys_param, control_param, output_path='./output/'):
|
||||
""" The function takes the input spectrum, the link description, the fiber description, the system parameters,
|
||||
the control parameters and a string describing the destination folder of the output files. After the function is
|
||||
executed the spectrum is updated with all the impairments of the link. The function also returns the linear and
|
||||
non linear OSNR, computed in the equivalent bandwidth.
|
||||
|
||||
:param spectrum: the spectrum dictionary containing the laser position (a list of boolean) and the list signals,
|
||||
which is a list of dictionaries (one for each channel) containing:
|
||||
'b_ch': the -3 dB bandwidth of the signal [THz]
|
||||
'roll_off': the roll off of the signal
|
||||
'p_ch': the signal power [W]
|
||||
'p_nli': the equivalent nli power [W]
|
||||
'p_ase': the ASE noise [W]
|
||||
:param link: the link structure. A list in which each element is a dictionary and it indicates one link component
|
||||
(PC, OA or fiber). List
|
||||
:param fibers: fibers is a dictionary containing a dictionary for each kind of fiber. Each dictionary has to report:
|
||||
reference_frequency: the frequency at which the parameters are evaluated [THz]
|
||||
alpha: the attenuation coefficient [dB/km]
|
||||
alpha_1st: the first derivative of alpha indicating the alpha slope [dB/km/THz]
|
||||
if you assume a flat attenuation with respect to the frequency you put it as zero
|
||||
beta_2: the dispersion coefficient [ps^2/km]
|
||||
n_2: second-order nonlinear refractive index [m^2/W]
|
||||
a typical value is 2.5E-20 m^2/W
|
||||
a_eff: the effective area of the fiber [um^2]
|
||||
:param sys_param: a dictionary containing the general system parameters:
|
||||
f0: the starting frequency of the laser grid used to describe the WDM system
|
||||
ns: the number of 6.25 GHz slots in the grid
|
||||
:param control_param: a dictionary containing the following parameters:
|
||||
save_each_comp: a boolean flag. If true, it saves in output folder one spectrum file at the output of each
|
||||
component, otherwise it saves just the last spectrum
|
||||
is_linear: a bool flag. If true, is doesn't compute NLI, if false, OLE will consider NLI
|
||||
is_analytic: a boolean flag. If true, the NLI is computed through the analytic formula, otherwise it uses
|
||||
the double integral. Warning: the double integral is very slow.
|
||||
points_not_interp: if the double integral is used, it indicates how much points are calculated, others will
|
||||
be interpolated
|
||||
kind_interp: a string indicating the interpolation method for the double integral
|
||||
th_fwm: the threshold of the four wave mixing efficiency for the double integral
|
||||
n_points: number of points in which the double integral is computed in the high FWM efficiency region
|
||||
n_points_min: number of points in which the double integral is computed in the low FWM efficiency region
|
||||
n_cores: number of cores for parallel computation [not yet implemented]
|
||||
:param output_path: the path in which the output files are saved. String
|
||||
:return: osnr_nli_db: an array containing the non-linear OSNR [dB], one value for each WDM channel. Array
|
||||
:return: osnr_lin_db: an array containing the linear OSNR [dB], one value for each WDM channel. Array
|
||||
"""
|
||||
|
||||
# Take control parameters
|
||||
flag_save_each_comp = control_param['save_each_comp']
|
||||
|
||||
# Evaluate frequency parameters
|
||||
f_cent, f_ch = get_frequencies_wdm(spectrum, sys_param)
|
||||
|
||||
# Evaluate spectrum parameters
|
||||
power, b_eq, roll_off, p_ase, p_nli, n_ch = get_spectrum_param(spectrum)
|
||||
|
||||
# Change reference to the central frequency f_cent for OA, PC and fibers
|
||||
change_component_ref(f_cent, link, fibers)
|
||||
|
||||
# Emulate the link
|
||||
for component in link:
|
||||
if component['comp_cat'] is 'PC':
|
||||
a_zero = component['loss']
|
||||
a_tilting = component['loss_tlt']
|
||||
|
||||
passive_component(spectrum, a_zero, a_tilting, f_ch)
|
||||
|
||||
elif component['comp_cat'] is 'OA':
|
||||
gain_zero = component['gain']
|
||||
gain_tilting = component['gain_tlt']
|
||||
noise_fig = component['noise_figure']
|
||||
|
||||
optical_amplifier(spectrum, gain_zero, gain_tilting, noise_fig, f_cent, f_ch, b_eq)
|
||||
|
||||
elif component['comp_cat'] is 'fiber':
|
||||
fiber_type = component['fiber_type']
|
||||
fiber_param = fibers[fiber_type]
|
||||
fiber_length = component['length']
|
||||
|
||||
fiber(spectrum, fiber_param, fiber_length, f_ch, b_eq, roll_off, control_param)
|
||||
|
||||
else:
|
||||
error_string = 'Error in link structure: the ' + component['comp_cat'] + ' category is unknown \n' \
|
||||
+ 'allowed values are (case sensitive): PC, OA and fiber'
|
||||
print(error_string)
|
||||
|
||||
if flag_save_each_comp:
|
||||
f_name = 'Output from component ID #' + component['comp_id']
|
||||
osnr_nli_db, osnr_lin_db = \
|
||||
compute_and_save_osnr(spectrum, flag_save=True, file_name=f_name, output_path=output_path)
|
||||
|
||||
osnr_nli_db, osnr_lin_db = \
|
||||
compute_and_save_osnr(spectrum, flag_save=True, file_name='link_output', output_path=output_path)
|
||||
|
||||
return osnr_nli_db, osnr_lin_db
|
||||
|
||||
29
gnpy/input/spectrum_in.py
Normal file
29
gnpy/input/spectrum_in.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# coding=utf-8
|
||||
""" spectrum_in.py describes the input spectrum of OLE, i.e. spectrum.
|
||||
spectrum is a dictionary containing two fields:
|
||||
laser_position: a list of bool indicating if a laser is turned on or not
|
||||
signals: a list of dictionaries each of them, describing one channel in the WDM comb
|
||||
|
||||
The laser_position is defined respect to a frequency grid of 6.25 GHz space and the first slot is at the
|
||||
frequency described by the variable f0 in the dictionary sys_param in the file "general_parameters.py"
|
||||
|
||||
Each dictionary element of the list 'signals' describes the profile of a WDM channel:
|
||||
b_ch: the -3 dB channel bandwidth (for a root raised cosine, it is equal to the symbol rate)
|
||||
roll_off: the roll off parameter of the root raised cosine shape
|
||||
p_ch: the channel power [W]
|
||||
p_nli: power of accumulated NLI in b_ch [W]
|
||||
p_ase: power of accumulated ASE noise in b_ch [W]
|
||||
"""
|
||||
|
||||
n_ch = 41
|
||||
|
||||
spectrum = {
|
||||
'laser_position': [0, 0, 0, 1, 0, 0, 0, 0] * n_ch,
|
||||
'signals': [{
|
||||
'b_ch': 0.032,
|
||||
'roll_off': 0.15,
|
||||
'p_ch': 1E-3,
|
||||
'p_nli': 0,
|
||||
'p_ase': 0
|
||||
} for _ in range(n_ch)]
|
||||
}
|
||||
150
gnpy/network_elements.py
Normal file
150
gnpy/network_elements.py
Normal file
@@ -0,0 +1,150 @@
|
||||
from networkx import DiGraph, all_simple_paths
|
||||
from collections import defaultdict
|
||||
from itertools import product
|
||||
|
||||
import gnpy
|
||||
from . import utils
|
||||
|
||||
|
||||
class Opath:
|
||||
|
||||
def __init__(self, nw, path):
|
||||
self.nw, self.path = nw, path
|
||||
|
||||
self.edge_list = {(elem, path[en + 1])
|
||||
for en, elem in enumerate(path[:-1])}
|
||||
self.elem_dict = {elem: self.find_io_edges(elem)
|
||||
for elem in self.path}
|
||||
|
||||
def find_io_edges(self, elem):
|
||||
iedges = set(self.nw.g.in_edges(elem) ) & self.edge_list
|
||||
oedges = set(self.nw.g.out_edges(elem)) & self.edge_list
|
||||
return {'in': iedges, 'out': oedges}
|
||||
|
||||
def propagate(self):
|
||||
for elem in self.path:
|
||||
elem.propagate(path=self)
|
||||
|
||||
|
||||
class Network:
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.nw_elems = defaultdict(list)
|
||||
self.g = DiGraph()
|
||||
|
||||
for elem in self.config['elements']:
|
||||
ne_type = TYPE_MAP[elem['type']]
|
||||
params = elem.pop('parameters')
|
||||
ne = ne_type(self, **elem, **params)
|
||||
self.nw_elems[ne_type].append(ne)
|
||||
self.g.add_node(ne)
|
||||
|
||||
for gpath in self.config['topology']:
|
||||
for u, v in utils.nwise(gpath):
|
||||
n0 = utils.find_by_node_id(self.g, u)
|
||||
n1 = utils.find_by_node_id(self.g, v)
|
||||
self.g.add_edge(n0, n1, channels=[])
|
||||
|
||||
# define all possible paths between tx's and rx's
|
||||
self.tr_paths = []
|
||||
for tx, rx in product(self.nw_elems[Tx], self.nw_elems[Rx]):
|
||||
for spath in all_simple_paths(self.g, tx, rx):
|
||||
self.tr_paths.append(Opath(self, spath))
|
||||
|
||||
def propagate_all_paths(self):
|
||||
for opath in self.tr_paths:
|
||||
opath.propagate()
|
||||
|
||||
|
||||
class NetworkElement:
|
||||
|
||||
def __init__(self, nw, *, id, type, name, description, **kwargs):
|
||||
self.nw = nw
|
||||
self.id, self.type = id, type
|
||||
self.name, self.description = name, description
|
||||
|
||||
def fetch_edge(self, edge):
|
||||
u, v = edge
|
||||
return self.nw.g[u][v]
|
||||
|
||||
def edge_dict(self, chan, osnr, d_power):
|
||||
return {'frequency': chan['frequency'],
|
||||
'osnr': osnr if osnr else chan['osnr'],
|
||||
'power': chan['power'] + d_power}
|
||||
|
||||
def __repr__(self):
|
||||
return f'NetworkElement(id={self.id}, type={self.type})'
|
||||
|
||||
|
||||
class Fiber(NetworkElement):
|
||||
|
||||
def __init__(self, *args, length, loss, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.length = length
|
||||
self.loss = loss
|
||||
|
||||
def propagate(self, path):
|
||||
attn = self.length * self.loss
|
||||
|
||||
for oedge in path.elem_dict[self]['out']:
|
||||
edge = self.fetch_edge(oedge)
|
||||
for pedge in (self.fetch_edge(x)
|
||||
for x in path.elem_dict[self]['in']):
|
||||
for chan in pedge['channels']:
|
||||
dct = self.edge_dict(chan, None, -attn)
|
||||
edge['channels'].append(dct)
|
||||
|
||||
|
||||
class Edfa(NetworkElement):
|
||||
|
||||
def __init__(self, *args, gain, nf, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.gain = gain
|
||||
self.nf = nf
|
||||
|
||||
def propagate(self, path):
|
||||
gain = self.gain[0]
|
||||
for inedge in path.elem_dict[self]['in']:
|
||||
|
||||
in_channels = self.fetch_edge(inedge)['channels']
|
||||
for chan in in_channels:
|
||||
osnr = utils.chan_osnr(chan, self)
|
||||
for edge in (self.fetch_edge(x)
|
||||
for x in path.elem_dict[self]['out']):
|
||||
dct = self.edge_dict(chan, osnr, gain)
|
||||
edge['channels'].append(dct)
|
||||
|
||||
|
||||
class Tx(NetworkElement):
|
||||
|
||||
def __init__(self, *args, channels, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.channels = channels
|
||||
|
||||
def propagate(self, path):
|
||||
for edge in (self.fetch_edge(x) for x in path.elem_dict[self]['out']):
|
||||
for chan in self.channels:
|
||||
dct = self.edge_dict(chan, None, 0)
|
||||
edge['channels'].append(dct)
|
||||
|
||||
|
||||
class Rx(NetworkElement):
|
||||
|
||||
def __init__(self, *args, sensitivity, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.sensitivity = sensitivity
|
||||
|
||||
def propagate(self, path):
|
||||
self.channels = {}
|
||||
for iedge in path.elem_dict[self]['in']:
|
||||
edge = self.fetch_edge(iedge)
|
||||
self.channels[path] = edge['channels']
|
||||
|
||||
|
||||
TYPE_MAP = {
|
||||
'fiber': Fiber,
|
||||
'tx': Tx,
|
||||
'rx': Rx,
|
||||
'edfa': Edfa,
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
27
gnpy/output/2017-08-04_17-49-28/link_output
Normal file
27
gnpy/output/2017-08-04_17-49-28/link_output
Normal file
File diff suppressed because one or more lines are too long
123
gnpy/sandbox/incoherent_gn.py
Normal file
123
gnpy/sandbox/incoherent_gn.py
Normal file
@@ -0,0 +1,123 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Dec 27 16:16:49 2016
|
||||
|
||||
@author: briantaylor
|
||||
"""
|
||||
from numpy import exp, pi
|
||||
import numpy as np
|
||||
from scipy.integrate import dblquad
|
||||
|
||||
|
||||
def ign_rho(f, span, f1, f2):
|
||||
"""
|
||||
This form or \\rho assumes lumped EDFA-like amplifcation. This function is
|
||||
also known as the link function.
|
||||
|
||||
Inputs:
|
||||
f = frequency array
|
||||
f1, f2 frequency bounds used to create a grid.
|
||||
span = the span object as defined by the Span class.
|
||||
ign_rho expects several parameters from Span in order to calculate the
|
||||
\\rho function.
|
||||
|
||||
This form currently sets \\beta_3 in the denominator to zero. This
|
||||
equation is taken from EQ[6], page 103 of:
|
||||
|
||||
The GN-Model of Fiber Non-Linear Propagation and its Applications
|
||||
P. Poggiolini;G. Bosco;A. Carena;V. Curri;Y. Jiang;F. Forghieri (2014)
|
||||
|
||||
Version used for this code came from:
|
||||
http://porto.polito.it/2542088/
|
||||
|
||||
TODO: Update the docu string with the IGN rho in Latex form
|
||||
TODO: Fix num length
|
||||
|
||||
"""
|
||||
num = 1 - exp(-2 * span.alpha * span.length) * \
|
||||
exp((1j * 4 * pi**2) * (f1 - f) * (f2 - f) * span.beta2 * span.length)
|
||||
den = 2 * span.alpha - (1j * 4 * pi**2) * (f1 - f) * (f2 - f) * span.beta2
|
||||
rho = np.abs(num / den) * span.leff**-2
|
||||
return rho
|
||||
|
||||
|
||||
def ign_function(f, span, f1, f2):
|
||||
"""
|
||||
This creates the integrand for the incoherenet gaussian noise reference
|
||||
function (IGNRF). It assumes \\rho for lumped EDFA-like amplifcation.
|
||||
|
||||
Inputs:
|
||||
f = frequency array
|
||||
f1, f2 frequency bounds used to create a grid.
|
||||
span = the span object as defined by the Span class.
|
||||
|
||||
This
|
||||
equation is taken from EQ[11], page 104 of:
|
||||
The GN-Model of Fiber Non-Linear Propagation and its Applications
|
||||
P. Poggiolini;G. Bosco;A. Carena;V. Curri;Y. Jiang;F. Forghieri (2014)
|
||||
|
||||
Version used for this code came from:
|
||||
http://porto.polito.it/2542088/
|
||||
|
||||
TODO: Update the docu string with the IGN rho in Latex form
|
||||
"""
|
||||
mult_coefs = 16 / 27 * (span.gamma ** 2) * span.nchan
|
||||
ign = mult_coefs * span.psd(f1) * span.psd(2)*span.psd(f1 + f2 - f) * \
|
||||
ign_rho(f, span, f1, f2)
|
||||
return ign
|
||||
|
||||
|
||||
def integrate_ign(span, f1, f2, f, options=None):
|
||||
"""
|
||||
integrate_ign integrates the ign function defined in ign_function.
|
||||
It uses scipy.integrate.dblquad to perform the double integral.
|
||||
|
||||
The GN model is integrated over 3 distinct regions and the result is then
|
||||
summed.
|
||||
"""
|
||||
|
||||
"""
|
||||
func : callable
|
||||
A Python function or method of at least two variables: y must be the first
|
||||
argument and x the second argument.
|
||||
a, b : float
|
||||
The limits of integration in x: a < b
|
||||
gfun : callable
|
||||
The lower boundary curve in y which is a function taking a single floating
|
||||
point argument (x) and returning a floating point result: a lambda function
|
||||
can be useful here.
|
||||
hfun : callable
|
||||
The upper boundary curve in y (same requirements as gfun).
|
||||
args : sequence, optional
|
||||
Extra arguments to pass to func.
|
||||
epsabs : float, optional
|
||||
Absolute tolerance passed directly to the inner 1-D quadrature integration.
|
||||
Default is 1.49e-8.
|
||||
epsrel : float, optional
|
||||
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
|
||||
|
||||
dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-08, epsrel=1.49e-08)
|
||||
|
||||
|
||||
|
||||
Definition : quad(func, a, b, args=(), full_output=0, epsabs=1.49e-08,
|
||||
epsrel=1.49e-08, limit=50, points=None, weight=None,
|
||||
wvar=None, wopts=None, maxp1=50, limlst=50)
|
||||
|
||||
|
||||
r1 = integral2(@(f1,f2) incoherent_inner(f, link, f1, f2),...
|
||||
max_lower_bound, max_upper_bound, mid_lower_bound, mid_upper_bound, ...
|
||||
'RelTol', model.RelTol, 'AbsTol', model.AbsTol_incoherent);
|
||||
|
||||
"""
|
||||
|
||||
max_lower_bound = np.min(span.psd)
|
||||
max_upper_bound = np.max(span.psd)
|
||||
mid_lower_bound = f - span.model.bound
|
||||
mid_upper_bound = f + span.model.bound
|
||||
|
||||
return [max_lower_bound, max_upper_bound, mid_lower_bound, mid_upper_bound]
|
||||
|
||||
|
||||
def integrate_hyperbolic(span, f1, f2, f, options=None):
|
||||
return None
|
||||
48
gnpy/sandbox/network_element.py
Normal file
48
gnpy/sandbox/network_element.py
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Jun 19 19:08:37 2017
|
||||
|
||||
@author: briantaylor
|
||||
"""
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
class NetworkElement:
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
self.direction = [("E", "Z"), ("E", "Z"), ("E", "Z"), ("W", "Z")]
|
||||
self.port_mapping = [(1, 5), (2, 5), (3, 5), (4, 5)]
|
||||
self.uid = uuid4()
|
||||
self.coordinates = (29.9792, 31.1342)
|
||||
"""
|
||||
try:
|
||||
for key in ('port_mapping', 'direction', 'coordinates', 'name',
|
||||
'description', 'manufacturer', 'model', 'sn', 'id'):
|
||||
if key in kwargs:
|
||||
setattr(self, key, kwargs[key])
|
||||
else:
|
||||
setattr(self, key, None)
|
||||
# print('No Value defined for :', key)
|
||||
# TODO: add logging functionality
|
||||
except KeyError as e:
|
||||
if 'name' in kwargs:
|
||||
s = kwargs['name']
|
||||
print('Missing Required Network Element Key!', 'name:=', s)
|
||||
# TODO Put log here instead of print
|
||||
print(e)
|
||||
raise
|
||||
|
||||
def get_output_ports(self):
|
||||
"""Translate the port mapping into list of output ports
|
||||
"""
|
||||
return None
|
||||
|
||||
def get_input_ports(self):
|
||||
"""Translate the port mapping into list of output ports
|
||||
"""
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
return self.__class__.__name__
|
||||
159
gnpy/sandbox/optical_elements.py
Normal file
159
gnpy/sandbox/optical_elements.py
Normal file
@@ -0,0 +1,159 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Dec 21 15:09:47 2016
|
||||
|
||||
@author: briantaylor
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from gnpy.constants import c, h
|
||||
|
||||
|
||||
class NetworkElement:
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
self.direction = [("E", "Z"), ("E", "Z"), ("E", "Z"), ("W", "Z")]
|
||||
self.port_mapping = [(1, 5), (2, 5), (3, 5), (4, 5)]
|
||||
self.uid = uuid4()
|
||||
self.coordinates = (29.9792, 31.1342)
|
||||
"""
|
||||
try:
|
||||
for key in ('port_mapping', 'direction', 'coordinates', 'name',
|
||||
'description', 'manufacturer', 'model', 'sn', 'id'):
|
||||
if key in kwargs:
|
||||
setattr(self, key, kwargs[key])
|
||||
else:
|
||||
setattr(self, key, None)
|
||||
# print('No Value defined for :', key)
|
||||
# TODO: add logging functionality
|
||||
except KeyError as e:
|
||||
if 'name' in kwargs:
|
||||
s = kwargs['name']
|
||||
print('Missing Required Network Element Key!', 'name:=', s)
|
||||
# TODO Put log here instead of print
|
||||
print(e)
|
||||
raise
|
||||
|
||||
def get_output_ports(self):
|
||||
"""Translate the port mapping into list of output ports
|
||||
"""
|
||||
return None
|
||||
|
||||
def get_input_ports(self):
|
||||
"""Translate the port mapping into list of output ports
|
||||
"""
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
return self.__class__.__name__
|
||||
|
||||
|
||||
class Edfa(NetworkElement):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
'''Reads in configuration data checking for keys. Sets those attributes
|
||||
for each element that exists.
|
||||
conventions:
|
||||
units are SI except where noted below (meters, seconds, Hz)
|
||||
rbw=12.5 GHz today.
|
||||
TODO add unit checking so inputs can be added in conventional
|
||||
nm units.
|
||||
nfdB = noise figure in dB units
|
||||
psatdB = saturation power in dB units
|
||||
gaindB = gain in dB units
|
||||
pdgdB = polarization dependent gain in dB
|
||||
rippledB = gain ripple in dB
|
||||
'''
|
||||
try:
|
||||
for key in ('gaindB', 'nfdB', 'psatdB', 'rbw', 'wavelengths',
|
||||
'pdgdB', 'rippledB', 'id', 'node', 'location'):
|
||||
if key in kwargs:
|
||||
setattr(self, key, kwargs[key])
|
||||
elif 'id' in kwargs is None:
|
||||
setattr(self, 'id', Edfa.class_counter)
|
||||
else:
|
||||
setattr(self, key, None)
|
||||
print('No Value defined for :', key)
|
||||
self.pas = [(h*c/ll)*self.rbw*1e9 for ll in self.wavelengths]
|
||||
|
||||
except KeyError as e:
|
||||
if 'name' in kwargs:
|
||||
s = kwargs['name']
|
||||
print('Missing Edfa Input Key!', 'name:=', s)
|
||||
print(e)
|
||||
raise
|
||||
|
||||
|
||||
class Fiber(NetworkElement):
|
||||
class_counter = 0
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
""" Reads in configuration data checking for keys. Sets those
|
||||
attributes for each element that exists.
|
||||
conventions:
|
||||
units are SI (meters, seconds, Hz) except where noted below
|
||||
rbw=12.5 GHz today. TODO add unit checking so inputs can be added
|
||||
in conventional nm units.
|
||||
nf_db = noise figure in dB units
|
||||
psat_db = saturation power in dB units
|
||||
gain_db = gain in dB units
|
||||
pdg_db = polarization dependent gain in dB
|
||||
ripple_db = gain ripple in dB
|
||||
"""
|
||||
try:
|
||||
for key in ('fiber_type', 'attenuationdB', 'span_length',
|
||||
'dispersion', 'wavelengths', 'id', 'name', 'location',
|
||||
'direction', 'launch_power', 'rbw'):
|
||||
if key in kwargs:
|
||||
setattr(self, key, kwargs[key])
|
||||
elif 'id' in kwargs is None:
|
||||
setattr(self, 'id', Span.class_counter)
|
||||
Span.class_counter += 1
|
||||
else:
|
||||
setattr(self, key, None)
|
||||
print('No Value defined for :', key)
|
||||
except KeyError as e:
|
||||
if 'name' in kwargs:
|
||||
s = kwargs['name']
|
||||
print('Missing Span Input Key!', 'name:=', s)
|
||||
print(e)
|
||||
raise
|
||||
|
||||
def effective_length(self, loss_coef):
|
||||
alpha_dict = self.dbkm_2_lin(loss_coef)
|
||||
alpha = alpha_dict['alpha_acoef']
|
||||
leff = 1 - np.exp(-2 * alpha * self.span_length)
|
||||
return leff
|
||||
|
||||
def asymptotic_length(self, loss_coef):
|
||||
alpha_dict = self.dbkm_2_lin(loss_coef)
|
||||
alpha = alpha_dict['alpha_acoef']
|
||||
aleff = 1/(2 * alpha)
|
||||
return aleff
|
||||
|
||||
|
||||
|
||||
def beta2(self, dispersion, ref_wavelength=None):
|
||||
""" Returns beta2 from dispersion parameter. Dispersion is entered in
|
||||
ps/nm/km. Disperion can be a numpy array or a single value. If a
|
||||
value ref_wavelength is not entered 1550e-9m will be assumed.
|
||||
ref_wavelength can be a numpy array.
|
||||
"""
|
||||
if ref_wavelength is None:
|
||||
ref_wavelength = 1550e-9
|
||||
wl = ref_wavelength
|
||||
D = np.abs(dispersion)
|
||||
b2 = (10**21) * (wl**2) * D / (2 * np.pi * c)
|
||||
# 10^21 scales to ps^2/km
|
||||
return b2
|
||||
|
||||
# TODO
|
||||
def generic_span(self):
|
||||
""" calculates a generic version that shows how all the functions of
|
||||
the class are used. It makes the following assumptions about the span:
|
||||
"""
|
||||
return
|
||||
|
||||
def __repr__(self):
|
||||
return f'{self.__class__.__name__}({self.span_length}km)'
|
||||
26
gnpy/sandbox/sandbox.py
Normal file
26
gnpy/sandbox/sandbox.py
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Jun 28 14:29:12 2017
|
||||
|
||||
@author: briantaylor
|
||||
"""
|
||||
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
G = nx.Graph()
|
||||
|
||||
G.add_node(1)
|
||||
|
||||
G.add_nodes_from([2, 3])
|
||||
H = nx.path_graph(10)
|
||||
G.add_nodes_from(H)
|
||||
G = nx.path_graph(8)
|
||||
nx.draw_spring(G)
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
class NetworkElement(nx.node)
|
||||
94
gnpy/sandbox/span.py
Normal file
94
gnpy/sandbox/span.py
Normal file
@@ -0,0 +1,94 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Dec 27 11:58:40 2016
|
||||
|
||||
@author: briantaylor
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from scipy.constants import h, c
|
||||
from numpy import array
|
||||
from network_element import NetworkElement
|
||||
|
||||
|
||||
class Span(NetworkElement):
|
||||
class_counter = 0
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
""" Reads in configuration data checking for keys. Sets those
|
||||
attributes for each element that exists.
|
||||
conventions:
|
||||
units are SI (meters, seconds, Hz) except where noted below
|
||||
rbw=12.5 GHz today. TODO add unit checking so inputs can be added
|
||||
in conventional nm units.
|
||||
nf_db = noise figure in dB units
|
||||
psat_db = saturation power in dB units
|
||||
gain_db = gain in dB units
|
||||
pdg_db = polarization dependent gain in dB
|
||||
ripple_db = gain ripple in dB
|
||||
"""
|
||||
try:
|
||||
for key in ('fiber_type', 'attenuationdB', 'span_length',
|
||||
'dispersion', 'wavelengths', 'id', 'name', 'location',
|
||||
'direction', 'launch_power', 'rbw'):
|
||||
if key in kwargs:
|
||||
setattr(self, key, kwargs[key])
|
||||
elif 'id' in kwargs is None:
|
||||
setattr(self, 'id', Span.class_counter)
|
||||
Span.class_counter += 1
|
||||
else:
|
||||
setattr(self, key, None)
|
||||
print('No Value defined for :', key)
|
||||
except KeyError as e:
|
||||
if 'name' in kwargs:
|
||||
s = kwargs['name']
|
||||
print('Missing Span Input Key!', 'name:=', s)
|
||||
print(e)
|
||||
raise
|
||||
|
||||
def effective_length(self, loss_coef):
|
||||
alpha_dict = self.dbkm_2_lin(loss_coef)
|
||||
alpha = alpha_dict['alpha_acoef']
|
||||
leff = 1 - np.exp(-2 * alpha * self.span_length)
|
||||
return leff
|
||||
|
||||
def asymptotic_length(self, loss_coef):
|
||||
alpha_dict = self.dbkm_2_lin(loss_coef)
|
||||
alpha = alpha_dict['alpha_acoef']
|
||||
aleff = 1/(2 * alpha)
|
||||
return aleff
|
||||
|
||||
def dbkm_2_lin(self, loss_coef):
|
||||
""" calculates the linear loss coefficient
|
||||
"""
|
||||
alpha_pcoef = loss_coef
|
||||
alpha_acoef = alpha_pcoef/(2*4.3429448190325184)
|
||||
s = 'alpha_pcoef is linear loss coefficient in [dB/km^-1] units'
|
||||
s = ''.join([s, "alpha_acoef is linear loss field amplitude \
|
||||
coefficient in [km^-1] units"])
|
||||
d = {'alpha_pcoef': alpha_pcoef, 'alpha_acoef': alpha_acoef,
|
||||
'description:': s}
|
||||
return d
|
||||
|
||||
def beta2(self, dispersion, ref_wavelength=None):
|
||||
""" Returns beta2 from dispersion parameter. Dispersion is entered in
|
||||
ps/nm/km. Disperion can be a numpy array or a single value. If a
|
||||
value ref_wavelength is not entered 1550e-9m will be assumed.
|
||||
ref_wavelength can be a numpy array.
|
||||
"""
|
||||
if ref_wavelength is None:
|
||||
ref_wavelength = 1550e-9
|
||||
wl = ref_wavelength
|
||||
D = np.abs(dispersion)
|
||||
b2 = (10**21) * (wl**2) * D / (2 * np.pi * c)
|
||||
# 10^21 scales to ps^2/km
|
||||
return b2
|
||||
|
||||
# TODO
|
||||
def generic_span(self):
|
||||
""" calculates a generic version that shows how all the functions of
|
||||
the class are used. It makes the following assumptions about the span:
|
||||
|
||||
"""
|
||||
|
||||
return
|
||||
35
gnpy/sandbox/transmit_psd.py
Normal file
35
gnpy/sandbox/transmit_psd.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sat Mar 25 18:46:19 2017
|
||||
|
||||
@author: briantaylor
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
|
||||
def generic_box_psd():
|
||||
"""
|
||||
creates a generic rectangular PSD at the channel spacing and baud rate
|
||||
TODO: convert input to kwargs
|
||||
Input is in THz (for now). Also normalizes the total power to 1 over the
|
||||
band of interest.
|
||||
"""
|
||||
baud = 0.034
|
||||
ffs = np.arange(193.95, 194.5, 0.05)
|
||||
zffs = 1e-6
|
||||
grid = []
|
||||
power = []
|
||||
"""
|
||||
TODO: The implementation below is awful. Please fix.
|
||||
"""
|
||||
for ea in ffs:
|
||||
fl1 = ea - baud/2 - zffs
|
||||
fl = ea - baud/2
|
||||
fr = ea + baud/2
|
||||
fr1 = ea + baud/2 + zffs
|
||||
grid = grid + [fl1, fl, ea, fr, fr1]
|
||||
power = power + [0, 1, 1, 1, 0]
|
||||
grid = np.array(grid)
|
||||
power = np.power(power)/np.sum(power)
|
||||
data = np.hstack(grid, power)
|
||||
return data
|
||||
43
gnpy/utils.py
Normal file
43
gnpy/utils.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import json
|
||||
from gnpy.constants import c, h
|
||||
import numpy as np
|
||||
from itertools import tee, islice
|
||||
|
||||
nwise = lambda g, n=2: zip(*(islice(g, i, None)
|
||||
for i, g in enumerate(tee(g, n))))
|
||||
|
||||
|
||||
def read_config(filepath):
|
||||
with open(filepath, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def find_by_node_id(g, nid):
|
||||
# TODO: What if nid is not found in graph (g)?
|
||||
return next(n for n in g.nodes() if n.id == nid)
|
||||
|
||||
|
||||
def dbkm_2_lin(loss_coef):
|
||||
""" calculates the linear loss coefficient
|
||||
"""
|
||||
alpha_pcoef = loss_coef
|
||||
alpha_acoef = alpha_pcoef/(2*4.3429448190325184)
|
||||
s = 'alpha_pcoef is linear loss coefficient in [dB/km^-1] units'
|
||||
s = ''.join([s, "alpha_acoef is linear loss field amplitude \
|
||||
coefficient in [km^-1] units"])
|
||||
d = {'alpha_pcoef': alpha_pcoef, 'alpha_acoef': alpha_acoef,
|
||||
'description:': s}
|
||||
return d
|
||||
|
||||
|
||||
def db_to_lin(val):
|
||||
return 10 ** (val / 10)
|
||||
|
||||
|
||||
def chan_osnr(chan_params, amp_params):
|
||||
in_osnr = db_to_lin(chan_params['osnr'])
|
||||
pin = db_to_lin(chan_params['power']) / 1e3
|
||||
nf = db_to_lin(amp_params.nf[0])
|
||||
ase_cont = nf * h * chan_params['frequency'] * 12.5 * 1e21
|
||||
ret = -10 * np.log10(1 / in_osnr + ase_cont / pin)
|
||||
return ret
|
||||
30
tox.ini
30
tox.ini
@@ -1,30 +0,0 @@
|
||||
[tox]
|
||||
envlist = py26, py27, py33, py34, py35, flake8
|
||||
|
||||
[travis]
|
||||
python =
|
||||
3.5: py35
|
||||
3.4: py34
|
||||
3.3: py33
|
||||
2.7: py27
|
||||
2.6: py26
|
||||
|
||||
[testenv:flake8]
|
||||
basepython=python
|
||||
deps=flake8
|
||||
commands=flake8 gnpy
|
||||
|
||||
[testenv]
|
||||
setenv =
|
||||
PYTHONPATH = {toxinidir}
|
||||
deps =
|
||||
-r{toxinidir}/requirements_dev.txt
|
||||
commands =
|
||||
pip install -U pip
|
||||
py.test --basetemp={envtmpdir}
|
||||
|
||||
|
||||
; If you want to make tox run the tests with the same versions, create a
|
||||
; requirements.txt with the pinned versions and uncomment the following lines:
|
||||
; deps =
|
||||
; -r{toxinidir}/requirements.txt
|
||||
@@ -1,127 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Update encrypted deploy password in Travis config file."""
|
||||
|
||||
|
||||
from __future__ import print_function
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
from getpass import getpass
|
||||
import yaml
|
||||
from cryptography.hazmat.primitives.serialization import load_pem_public_key
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
|
||||
|
||||
|
||||
try:
|
||||
from urllib import urlopen
|
||||
except ImportError:
|
||||
from urllib.request import urlopen
|
||||
|
||||
|
||||
GITHUB_REPO = '<TBD>/gnpy'
|
||||
TRAVIS_CONFIG_FILE = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
|
||||
|
||||
|
||||
def load_key(pubkey):
|
||||
"""Load public RSA key.
|
||||
|
||||
Work around keys with incorrect header/footer format.
|
||||
|
||||
Read more about RSA encryption with cryptography:
|
||||
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
|
||||
"""
|
||||
try:
|
||||
return load_pem_public_key(pubkey.encode(), default_backend())
|
||||
except ValueError:
|
||||
# workaround for https://github.com/travis-ci/travis-api/issues/196
|
||||
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
|
||||
return load_pem_public_key(pubkey.encode(), default_backend())
|
||||
|
||||
|
||||
def encrypt(pubkey, password):
|
||||
"""Encrypt password using given RSA public key and encode it with base64.
|
||||
|
||||
The encrypted password can only be decrypted by someone with the
|
||||
private key (in this case, only Travis).
|
||||
"""
|
||||
key = load_key(pubkey)
|
||||
encrypted_password = key.encrypt(password, PKCS1v15())
|
||||
return base64.b64encode(encrypted_password)
|
||||
|
||||
|
||||
def fetch_public_key(repo):
|
||||
"""Download RSA public key Travis will use for this repo.
|
||||
|
||||
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
|
||||
"""
|
||||
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
|
||||
data = json.loads(urlopen(keyurl).read().decode())
|
||||
if 'key' not in data:
|
||||
errmsg = "Could not find public key for repo: {}.\n".format(repo)
|
||||
errmsg += "Have you already added your GitHub repo to Travis?"
|
||||
raise ValueError(errmsg)
|
||||
return data['key']
|
||||
|
||||
|
||||
def prepend_line(filepath, line):
|
||||
"""Rewrite a file adding a line to its beginning."""
|
||||
with open(filepath) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
lines.insert(0, line)
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
f.writelines(lines)
|
||||
|
||||
|
||||
def load_yaml_config(filepath):
|
||||
"""Load yaml config file at the given path."""
|
||||
with open(filepath) as f:
|
||||
return yaml.load(f)
|
||||
|
||||
|
||||
def save_yaml_config(filepath, config):
|
||||
"""Save yaml config file at the given path."""
|
||||
with open(filepath, 'w') as f:
|
||||
yaml.dump(config, f, default_flow_style=False)
|
||||
|
||||
|
||||
def update_travis_deploy_password(encrypted_password):
|
||||
"""Put `encrypted_password` into the deploy section of .travis.yml."""
|
||||
config = load_yaml_config(TRAVIS_CONFIG_FILE)
|
||||
|
||||
config['deploy']['password'] = dict(secure=encrypted_password)
|
||||
|
||||
save_yaml_config(TRAVIS_CONFIG_FILE, config)
|
||||
|
||||
line = ('# This file was autogenerated and will overwrite'
|
||||
' each time you run travis_pypi_setup.py\n')
|
||||
prepend_line(TRAVIS_CONFIG_FILE, line)
|
||||
|
||||
|
||||
def main(args):
|
||||
"""Add a PyPI password to .travis.yml so that Travis can deploy to PyPI.
|
||||
|
||||
Fetch the Travis public key for the repo, and encrypt the PyPI password
|
||||
with it before adding, so that only Travis can decrypt and use the PyPI
|
||||
password.
|
||||
"""
|
||||
public_key = fetch_public_key(args.repo)
|
||||
password = args.password or getpass('PyPI password: ')
|
||||
update_travis_deploy_password(encrypt(public_key, password.encode()))
|
||||
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
|
||||
|
||||
|
||||
if '__main__' == __name__:
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description=__doc__)
|
||||
parser.add_argument('--repo', default=GITHUB_REPO,
|
||||
help='GitHub repo (default: %s)' % GITHUB_REPO)
|
||||
parser.add_argument('--password',
|
||||
help='PyPI password (will prompt if not provided)')
|
||||
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
Reference in New Issue
Block a user