Skip to content

API documentation

Main module.

RFR_dict(input_date=None, cache={})

Generates a dictionary containing filenames based on the reference date and other data derived from it.

Parameters:

Name Type Description Default
input_date str

The input date in the format "%Y-%m-%d". Defaults to None, which means the current date is used.

None
cache dict

A dictionary to store intermediate and final results. Defaults to an empty dictionary.

{}

Returns:

Name Type Description
dict dict

A dictionary containing generated filenames and other data. The dictionary includes the following keys: - "input_date": The input date in the format "%Y-%m-%d". - "reference_date": The reference date in the format "%Y%m%d". - "name_excelfile": The filename for the EIOPA RFR term structures Excel file. - "name_excelfile_spreads": The filename for the EIOPA RFR PD Cod Excel file.

Source code in solvency2_data/rfr.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
def RFR_dict(input_date: str = None, cache: dict = {}) -> dict:
    """
    Generates a dictionary containing filenames based on the reference date
    and other data derived from it.

    Args:
        input_date (str, optional): The input date in the format "%Y-%m-%d".
            Defaults to None, which means the current date is used.
        cache (dict, optional): A dictionary to store intermediate and final results.
            Defaults to an empty dictionary.

    Returns:
        dict: A dictionary containing generated filenames and other data.
            The dictionary includes the following keys:
                - "input_date": The input date in the format "%Y-%m-%d".
                - "reference_date": The reference date in the format "%Y%m%d".
                - "name_excelfile": The filename for the EIOPA RFR term structures Excel file.
                - "name_excelfile_spreads": The filename for the EIOPA RFR PD Cod Excel file.
    """
    cache = RFR_reference_date(input_date, cache)
    cache["name_excelfile"] = (
        "EIOPA_RFR_" + cache["reference_date"] + "_Term_Structures" + ".xlsx"
    )
    cache["name_excelfile_spreads"] = (
        "EIOPA_RFR_" + cache["reference_date"] + "_PD_Cod" + ".xlsx"
    )
    return cache

RFR_reference_date(input_date=None, cache={})

Calculates the reference date based on the input date or the current date. If no input date is provided or if the input date is in the future, it defaults to the current date. If the current date is before the 5th of the month, it sets the reference date to the last day of the previous month. Otherwise, it sets the reference date to the next day after the input date.

Parameters:

Name Type Description Default
input_date str

The input date in the format "%Y-%m-%d". Defaults to None, which means the current date is used.

None
cache dict

A dictionary to store the calculated reference date. Defaults to an empty dictionary.

{}

Returns:

Name Type Description
dict dict

A dictionary containing the input date and the reference date. The input date is stored under the key "input_date" in the format "%Y-%m-%d". The reference date is stored under the key "reference_date" in the format "%Y%m%d".

Source code in solvency2_data/rfr.py
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
def RFR_reference_date(input_date: str = None, cache: dict = {}) -> dict:
    """
    Calculates the reference date based on the input date or the current date.
    If no input date is provided or if the input date is in the future,
    it defaults to the current date. If the current date is before the 5th of the month,
    it sets the reference date to the last day of the previous month.
    Otherwise, it sets the reference date to the next day after the input date.

    Args:
        input_date (str, optional): The input date in the format "%Y-%m-%d".
            Defaults to None, which means the current date is used.
        cache (dict, optional): A dictionary to store the calculated reference date.
            Defaults to an empty dictionary.

    Returns:
        dict: A dictionary containing the input date and the reference date.
            The input date is stored under the key "input_date" in the format "%Y-%m-%d".
            The reference date is stored under the key "reference_date" in the format "%Y%m%d".
    """
    if input_date is not None:
        reference_date = datetime.datetime.strptime(input_date, "%Y-%m-%d")
    else:
        reference_date = None

    if (reference_date is None) or (reference_date > datetime.datetime.today()):
        reference_date = datetime.datetime.today()

        if reference_date.day < 5:
            reference_date = reference_date.replace(day=1) - datetime.timedelta(days=1)
    else:
        reference_date = reference_date + datetime.timedelta(days=1)

    # to do : check if end of month
    reference_date = reference_date.replace(day=1) - datetime.timedelta(days=1)

    cache["input_date"] = reference_date.strftime("%Y-%m-%d")
    cache["reference_date"] = cache["input_date"].replace("-", "")

    return cache

download_RFR(input_date=None, cache={})

Downloads EIOPA RFR (Risk-Free Rate) files for a given date and saves them locally.

Parameters:

Name Type Description Default
input_date str

The input date in the format "%Y-%m-%d". Defaults to None, which means the current date is used.

None
cache dict

A dictionary to store intermediate and final results. Defaults to an empty dictionary.

{}

Returns:

Name Type Description
dict dict

A dictionary containing information about the downloaded files and paths. The dictionary includes the following keys: - "input_date": The input date in the format "%Y-%m-%d". - "reference_date": The reference date in the format "%Y%m%d". - "name_excelfile": The filename for the downloaded EIOPA RFR term structures Excel file. - "name_excelfile_spreads": The filename for the downloaded EIOPA RFR PD Cod Excel file. - "url": The URL from which the files were downloaded. - "name_zipfile": The name of the downloaded zip file. - "path_excelfile": The path where the Excel files are saved. - "path_zipfile": The path where the zip file is saved.

Source code in solvency2_data/rfr.py
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
def download_RFR(input_date: str = None, cache: dict = {}) -> dict:
    """
    Downloads EIOPA RFR (Risk-Free Rate) files for a given date and saves them locally.

    Args:
        input_date (str, optional): The input date in the format "%Y-%m-%d".
            Defaults to None, which means the current date is used.
        cache (dict, optional): A dictionary to store intermediate and final results.
            Defaults to an empty dictionary.

    Returns:
        dict: A dictionary containing information about the downloaded files and paths.
            The dictionary includes the following keys:
                - "input_date": The input date in the format "%Y-%m-%d".
                - "reference_date": The reference date in the format "%Y%m%d".
                - "name_excelfile": The filename for the downloaded EIOPA RFR term structures Excel file.
                - "name_excelfile_spreads": The filename for the downloaded EIOPA RFR PD Cod Excel file.
                - "url": The URL from which the files were downloaded.
                - "name_zipfile": The name of the downloaded zip file.
                - "path_excelfile": The path where the Excel files are saved.
                - "path_zipfile": The path where the zip file is saved.
    """
    cache = RFR_dict(input_date, cache)

    if not (
        os.path.isfile(join(cache["path_excelfile"], cache["name_excelfile"]))
    ) or not (
        os.path.isfile(join(cache["path_excelfile"], cache["name_excelfile_spreads"]))
    ):
        # determine correct url and zipfile
        cache["url"] = eiopa_link(cache["input_date"], data_type="rfr")
        cache["name_zipfile"] = os.path.basename(cache["url"]).split("filename=")[-1]

        # download file
        request = urlopen(cache["url"])

        # save zip-file
        output = open(join(cache["path_zipfile"], cache["name_zipfile"]), "wb")
        output.write(request.read())
        output.close()

        name_excelfile = None
        name_excelfile_spreads = None
        zip_ref = zipfile.ZipFile(join(cache["path_zipfile"], cache["name_zipfile"]))
        for idx, name in enumerate(zip_ref.namelist()):
            if name.lower() == cache["name_excelfile"].lower():
                name_excelfile = name
            if name.lower() == cache["name_excelfile_spreads"].lower():
                name_excelfile_spreads = name
        if name_excelfile is not None:
            zip_ref.extract(name_excelfile, cache["path_excelfile"])
        if name_excelfile_spreads is not None:
            zip_ref.extract(name_excelfile_spreads, cache["path_excelfile"])
        zip_ref.close()

        # remove zip file
        # os.remove(cache['path_zipfile'] + cache["name_zipfile"])

    return cache

read(input_date=None, path=None)

Reads data from Excel files and stores it in a dictionary.

Parameters:

Name Type Description Default
input_date str

The input date in the format "%Y-%m-%d". Defaults to None, which means the current date is used.

None
path str

The path to the directory containing Excel files. If None, it looks for .cfg files in the current directory or the package directory. Defaults to None.

None

Returns:

Name Type Description
dict dict

A dictionary containing the read data. The dictionary includes various keys storing downloaded files, metadata, spot data, spreadsheets, etc.

Source code in solvency2_data/rfr.py
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
def read(input_date=None, path: str = None) -> dict:
    """
    Reads data from Excel files and stores it in a dictionary.

    Args:
        input_date (str, optional): The input date in the format "%Y-%m-%d".
            Defaults to None, which means the current date is used.
        path (str, optional): The path to the directory containing Excel files.
            If None, it looks for .cfg files in the current directory or the package directory.
            Defaults to None.

    Returns:
        dict: A dictionary containing the read data.
            The dictionary includes various keys storing downloaded files, metadata, spot data, spreadsheets, etc.
    """
    if path is None:
        # look in current directory for .cfg file
        # if not exists then take the .cfg file in the package directory
        config = get_config().get("Directories")

        path_zipfile = config.get("zip_files")
        path_excelfile = config.get("excel_files")
    else:
        path_zipfile = path
        path_excelfile = path

    cache = {"path_zipfile": path_zipfile, "path_excelfile": path_excelfile}

    cache = download_RFR(input_date, cache)
    xls = pd.ExcelFile(
        join(cache["path_excelfile"], cache["name_excelfile"]), engine="openpyxl"
    )
    cache = read_meta(xls, cache)
    cache = read_spot(xls, cache)
    xls_spreads = pd.ExcelFile(
        join(cache["path_excelfile"], cache["name_excelfile_spreads"]),
        engine="openpyxl",
    )
    cache = read_spreads(xls_spreads, cache)
    cache = read_govies(xls_spreads, cache)

    xls.close()
    xls_spreads.close()

    return cache

read_govies(xls, cache={})

Reads central government fundamental spreads from an Excel file and stores them in a dictionary.

Parameters:

Name Type Description Default
xls

An object representing the Excel file.

required
cache dict

A dictionary to store the read spreadsheets. Defaults to an empty dictionary.

{}

Returns:

Name Type Description
dict dict

A dictionary containing the read spreadsheets. The dictionary includes the following keys: - "central government fundamental spreads": A DataFrame containing central government spreads. The DataFrame includes spreads for various financial attributes indexed by dates.

Source code in solvency2_data/rfr.py
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
def read_govies(xls, cache: dict = {}) -> dict:
    """
    Reads central government fundamental spreads from an Excel file and stores them in a dictionary.

    Args:
        xls : An object representing the Excel file.
        cache (dict, optional): A dictionary to store the read spreadsheets.
            Defaults to an empty dictionary.

    Returns:
        dict: A dictionary containing the read spreadsheets.
            The dictionary includes the following keys:
                - "central government fundamental spreads": A DataFrame containing central government spreads.
            The DataFrame includes spreads for various financial attributes indexed by dates.
    """
    cache["central government fundamental spreads"] = None
    for name in ["FS_Govts"]:
        if name in xls.sheet_names:
            df = pd.read_excel(
                io=xls,
                sheet_name=name,
                usecols="B:AF",
                nrows=53,
                index_col=0,
                skiprows=9,
            )
            # This line introduces a dependency on the spots
            # df.index = cache['RFR_spot_no_VA'].columns
            cache["central government fundamental spreads"] = df.T

    return cache

read_meta(xls, cache={})

Reads metadata from an Excel file and stores it in a dictionary.

Parameters:

Name Type Description Default
xls

An object representing the Excel file.

required
cache dict

A dictionary to store the read metadata. Defaults to an empty dictionary.

{}

Returns:

Name Type Description
dict dict

A dictionary containing the read metadata. The dictionary includes the following key: - "meta": DataFrame containing metadata. The DataFrame includes metadata indexed by "meta" and may include information such as headers, column descriptions, and other relevant details.

Source code in solvency2_data/rfr.py
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
def read_meta(xls, cache: str = {}) -> dict:
    """
    Reads metadata from an Excel file and stores it in a dictionary.

    Args:
        xls : An object representing the Excel file.
        cache (dict, optional): A dictionary to store the read metadata.
            Defaults to an empty dictionary.

    Returns:
        dict: A dictionary containing the read metadata.
            The dictionary includes the following key:
                - "meta": DataFrame containing metadata.
            The DataFrame includes metadata indexed by "meta" and may include information
            such as headers, column descriptions, and other relevant details.
    """
    df_meta = pd.read_excel(
        xls, sheet_name="RFR_spot_with_VA", header=1, index_col=1, skipfooter=150
    )
    # drop unnamed columns from the excel file
    for col in df_meta.columns:
        if "Unnamed:" in col:
            df_meta = df_meta.drop(col, axis=1)

    df_meta.loc["VA", :].infer_objects(copy=False).fillna(0, inplace=True)
    df_meta = df_meta.iloc[0:8]
    df_meta.index.names = ["meta"]
    df_meta.index = df_meta.index.fillna("Info")

    # Reference date is not strictly part of meta
    # df_append = pd.DataFrame(index=['reference date'],
    #                          columns=df_meta.columns)
    # # df_append.loc['reference date'] = cache["reference_date"]
    # df_meta = df_meta.append(df_append)

    cache["meta"] = df_meta

    return cache

read_spot(xls, cache={})

Reads various spot data from an Excel file and stores them in a dictionary.

Parameters:

Name Type Description Default
xls

An object representing the Excel file.

required
cache dict

A dictionary to store the read spot data. Defaults to an empty dictionary.

{}

Returns:

Name Type Description
dict dict

A dictionary containing the read spot data. The dictionary includes the following keys: - "RFR_spot_no_VA": DataFrame containing spot data without VA. - "RFR_spot_with_VA": DataFrame containing spot data with VA. - "Spot_NO_VA_shock_UP": DataFrame containing spot data without VA with UP shock. - "Spot_NO_VA_shock_DOWN": DataFrame containing spot data without VA with DOWN shock. - "Spot_WITH_VA_shock_UP": DataFrame containing spot data with VA with UP shock. - "Spot_WITH_VA_shock_DOWN": DataFrame containing spot data with VA with DOWN shock. Each DataFrame contains spot data indexed by duration.

Source code in solvency2_data/rfr.py
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
def read_spot(xls, cache: dict = {}) -> dict:
    """
    Reads various spot data from an Excel file and stores them in a dictionary.

    Args:
        xls : An object representing the Excel file.
        cache (dict, optional): A dictionary to store the read spot data.
            Defaults to an empty dictionary.

    Returns:
        dict: A dictionary containing the read spot data.
            The dictionary includes the following keys:
                - "RFR_spot_no_VA": DataFrame containing spot data without VA.
                - "RFR_spot_with_VA": DataFrame containing spot data with VA.
                - "Spot_NO_VA_shock_UP": DataFrame containing spot data without VA with UP shock.
                - "Spot_NO_VA_shock_DOWN": DataFrame containing spot data without VA with DOWN shock.
                - "Spot_WITH_VA_shock_UP": DataFrame containing spot data with VA with UP shock.
                - "Spot_WITH_VA_shock_DOWN": DataFrame containing spot data with VA with DOWN shock.
            Each DataFrame contains spot data indexed by duration.
    """
    for name in [
        "RFR_spot_no_VA",
        "RFR_spot_with_VA",
        "Spot_NO_VA_shock_UP",
        "Spot_NO_VA_shock_DOWN",
        "Spot_WITH_VA_shock_UP",
        "Spot_WITH_VA_shock_DOWN",
    ]:
        if name in xls.sheet_names:
            df = pd.read_excel(
                io=xls, sheet_name=name, header=1, nrows=158, index_col=1
            )
            # drop unnamed columns from the excel file
            for col in df.columns:
                if "Unnamed:" in col:
                    df = df.drop(col, axis=1)
            df.loc["VA"].infer_objects(copy=False).fillna(0, inplace=True)
            df = df.iloc[8:]
            df.index.names = ["Duration"]
            cache[name] = df

    return cache

read_spreads(xls, cache={})

Reads financial and non-financial fundamental spreads from an Excel file and stores them in a dictionary.

Parameters:

Name Type Description Default
xls ExcelFile

An ExcelFile object containing the spreadsheets.

required
cache dict

A dictionary to store the read spreadsheets. Defaults to an empty dictionary.

{}

Returns:

Name Type Description
dict dict

A dictionary containing the read spreadsheets. The dictionary includes the following keys: - "financial fundamental spreads": A dictionary containing financial spreadsheets for different currencies. - "non-financial fundamental spreads": A dictionary containing non-financial spreadsheets for different currencies. Each sub-dictionary contains DataFrames with financial or non-financial spreads for the respective currencies.

Source code in solvency2_data/rfr.py
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
def read_spreads(xls: pd.ExcelFile, cache: dict = {}) -> dict:
    """
    Reads financial and non-financial fundamental spreads from an Excel file and stores them in a dictionary.

    Args:
        xls (pd.ExcelFile): An ExcelFile object containing the spreadsheets.
        cache (dict, optional): A dictionary to store the read spreadsheets.
            Defaults to an empty dictionary.

    Returns:
        dict: A dictionary containing the read spreadsheets.
            The dictionary includes the following keys:
                - "financial fundamental spreads": A dictionary containing financial spreadsheets
                    for different currencies.
                - "non-financial fundamental spreads": A dictionary containing non-financial spreadsheets
                    for different currencies.
            Each sub-dictionary contains DataFrames with financial or non-financial spreads
            for the respective currencies.
    """
    cache["financial fundamental spreads"] = {}
    for name in currencies:
        if name in xls.sheet_names:
            df = pd.read_excel(
                io=xls,
                sheet_name=name,
                header=1,
                usecols="W:AC",
                nrows=30,
                skiprows=8,
                names=[0, 1, 2, 3, 4, 5, 6],
            )
            df.index = range(1, 31)
            cache["financial fundamental spreads"][name] = df

    cache["non-financial fundamental spreads"] = {}
    for name in currencies:
        if name in xls.sheet_names:
            df = pd.read_excel(
                io=xls,
                sheet_name=name,
                header=1,
                usecols="W:AC",
                nrows=30,
                skiprows=48,
                names=[0, 1, 2, 3, 4, 5, 6],
            )
            df.index = range(1, 31)
            cache["non-financial fundamental spreads"][name] = df

    return cache

Downloads rfr and stores in sqlite database for future reference

add_to_db(ref_date, db, data_type='rfr', workspace=None)

Adds data to the EIOPA database, to use when you are missing data.

Parameters:

Name Type Description Default
ref_date str

The reference date in the format "%Y-%m-%d".

required
db EiopaDB

The EIOPA database instance.

required
data_type str

The type of data to add. Options: "rfr" (default), "meta", "spreads", "govies", "sym_adj".

'rfr'
workspace dict

A dictionary containing workspace directories and paths. If None, it retrieves workspace information using get_workspace() function. Defaults to None.

None

Returns:

Type Description

None

Source code in solvency2_data/eiopa_data.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
def add_to_db(
    ref_date: str, db: EiopaDB, data_type: str = "rfr", workspace: dict = None
):
    """
    Adds data to the EIOPA database, to use when you are missing data.

    Args:
        ref_date (str): The reference date in the format "%Y-%m-%d".
        db (EiopaDB): The EIOPA database instance.
        data_type (str, optional): The type of data to add.
            Options: "rfr" (default), "meta", "spreads", "govies", "sym_adj".
        workspace (dict, optional): A dictionary containing workspace directories and paths.
            If None, it retrieves workspace information using get_workspace() function.
            Defaults to None.

    Returns:
        None
    """
    url = eiopa_link(ref_date, data_type=data_type)
    set_id = db.get_set_id(url)

    if data_type != "sym_adj":
        files = download_EIOPA_rates(url, ref_date)
        if data_type == "rfr":
            df = extract_spot_rates(files[data_type])
        elif data_type == "meta":
            df = extract_meta(files[data_type])
        elif data_type == "spreads":
            df = extract_spreads(files[data_type])
        elif data_type == "govies":
            df = extract_govies(files[data_type])
        else:
            raise KeyError
    elif data_type == "sym_adj":
        if workspace is None:
            workspace = get_workspace()
        raw_folder = workspace["raw_data"]
        file = download_file(url, raw_folder)
        df = extract_sym_adj(file, ref_date)

    if df is not None:
        df = df.reset_index()
        df["url_id"] = set_id
        df["ref_date"] = ref_date
        df.to_sql(data_type, con=db.conn, if_exists="append", index=False)
        set_types = {"govies": "rfr", "spreads": "rfr", "meta": "rfr"}
        db.update_catalog(
            url_id=set_id,
            dict_vals={
                "set_type": set_types.get(data_type, data_type),
                "primary_set": True,
                "ref_date": ref_date,
            },
        )
    return None

download_EIOPA_rates(url, ref_date, workspace=None)

Downloads EIOPA RFR (Risk-Free Rate) files from a given URL and extracts them.

Parameters:

Name Type Description Default
url str

The URL from which to download the EIOPA RFR files.

required
ref_date str

The reference date in the format "%Y-%m-%d".

required
workspace dict

A dictionary containing workspace directories and paths. If None, it retrieves workspace information using get_workspace() function. Defaults to None.

None

Returns:

Name Type Description
dict dict

A dictionary containing the paths to the downloaded files. The dictionary includes the following keys: - "rfr": The path to the downloaded EIOPA RFR term structures Excel file. - "meta": The path to the downloaded EIOPA RFR meta Excel file. - "spreads": The path to the downloaded EIOPA RFR PD Cod Excel file. - "govies": The path to the downloaded EIOPA RFR govvies Excel file.

Source code in solvency2_data/eiopa_data.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def download_EIOPA_rates(url: str, ref_date: str, workspace: dict = None) -> dict:
    """
    Downloads EIOPA RFR (Risk-Free Rate) files from a given URL and extracts them.

    Args:
        url (str): The URL from which to download the EIOPA RFR files.
        ref_date (str): The reference date in the format "%Y-%m-%d".
        workspace (dict, optional): A dictionary containing workspace directories and paths.
            If None, it retrieves workspace information using get_workspace() function.
            Defaults to None.

    Returns:
        dict: A dictionary containing the paths to the downloaded files.
            The dictionary includes the following keys:
                - "rfr": The path to the downloaded EIOPA RFR term structures Excel file.
                - "meta": The path to the downloaded EIOPA RFR meta Excel file.
                - "spreads": The path to the downloaded EIOPA RFR PD Cod Excel file.
                - "govies": The path to the downloaded EIOPA RFR govvies Excel file.
    """
    if workspace is None:
        workspace = get_workspace()
    raw_folder = workspace["raw_data"]
    zip_file = download_file(url, raw_folder)

    # Change format of ref_date string for EIOPA Excel files from YYYY-mm-dd to YYYYmmdd:
    reference_date = ref_date.replace("-", "")

    name_excelfile = "EIOPA_RFR_" + reference_date + "_Term_Structures" + ".xlsx"
    name_excelfile_spreads = "EIOPA_RFR_" + reference_date + "_PD_Cod" + ".xlsx"
    # Making file paths string insensitve via regex
    re_rfr = re.compile(f"(?i:{name_excelfile})")
    re_spreads = re.compile(f"(?i:{name_excelfile_spreads})")

    with zipfile.ZipFile(zip_file) as zipobj:
        for file in zipobj.namelist():
            res_rfr = re_rfr.search(file)
            res_spreads = re_spreads.search(file)
            if res_rfr:
                rfr_file = res_rfr.group(0)
                zipobj.extract(rfr_file, raw_folder)
            if res_spreads:
                spreads_file = res_spreads.group(0)
                zipobj.extract(spreads_file, raw_folder)
    return {
        "rfr": os.path.join(raw_folder, name_excelfile),
        "meta": os.path.join(raw_folder, name_excelfile),
        "spreads": os.path.join(raw_folder, name_excelfile_spreads),
        "govies": os.path.join(raw_folder, name_excelfile_spreads),
    }

download_file(url, raw_folder, filename='')

Downloads a file from a URL and saves it in a specified folder.

Parameters:

Name Type Description Default
url str

The URL of the file to download.

required
raw_folder str

The path to the directory where the file will be saved.

required
filename str

The desired filename. If not provided, the filename will be derived from the URL. Defaults to "".

''

Returns:

Name Type Description
str str

The path to the downloaded file.

Source code in solvency2_data/eiopa_data.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def download_file(url: str, raw_folder: str, filename: str = "") -> str:
    """
    Downloads a file from a URL and saves it in a specified folder.

    Args:
        url (str): The URL of the file to download.
        raw_folder (str): The path to the directory where the file will be saved.
        filename (str, optional): The desired filename. If not provided,
            the filename will be derived from the URL. Defaults to "".

    Returns:
        str: The path to the downloaded file.
    """
    if filename:
        extension = url[(url.rfind(".")) :]
        if extension not in filename:
            filename = filename + extension
        else:
            pass
    else:
        # if filename not specified, then the file name will be the original file name
        filename = url[(url.rfind("/") + 1) :]
        # make sure that the filename does not contain illegal characters
        filename = re.sub(r"[^\w_. -]", "_", filename)

    if filename[-4:] != ".zip":
        filename = filename + ".zip"

    target_file = os.path.join(raw_folder, filename)

    if os.path.isfile(target_file):
        logging.info("file already exists in this location, not downloading")
    else:
        if not os.path.exists(raw_folder):
            os.makedirs(raw_folder)
        urllib.request.urlretrieve(url, target_file)  # simpler for file downloading
        logging.info(
            "file downloaded and saved in the following location: " + target_file
        )

    return target_file

extract_govies(govies_filepath)

Extracts government spreads data from an EIOPA RFR spreads Excel file.

Parameters:

Name Type Description Default
govies_filepath str

The path to the EIOPA RFR spreads Excel file containing government spreads.

required

Returns:

Type Description

pandas.DataFrame or None: A DataFrame containing extracted government spreads data, indexed by country code and duration. Returns None if no government spreads are found.

Source code in solvency2_data/eiopa_data.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
def extract_govies(govies_filepath):
    """
    Extracts government spreads data from an EIOPA RFR spreads Excel file.

    Args:
        govies_filepath (str): The path to the EIOPA RFR spreads Excel file containing government spreads.

    Returns:
        pandas.DataFrame or None: A DataFrame containing extracted government spreads data,
            indexed by country code and duration.
            Returns None if no government spreads are found.
    """
    logging.info("Extracting govies: " + govies_filepath)
    xls = pd.ExcelFile(govies_filepath, engine="openpyxl")
    cache = read_govies(xls)
    if cache["central government fundamental spreads"] is not None:
        spreads_gov = (
            cache["central government fundamental spreads"]
            .stack()
            .rename("spread")
            .to_frame()
        )
        spreads_gov.index.names = ["duration", "country_code"]
        spreads_gov.index = spreads_gov.index.reorder_levels([1, 0])
    else:
        logging.error("No govies found: " + govies_filepath)
        spreads_gov = None
    return spreads_gov

extract_meta(rfr_filepath)

Extracts metadata from an EIOPA RFR Excel file.

Parameters:

Name Type Description Default
rfr_filepath str

The path to the EIOPA RFR Excel file.

required

Returns:

Name Type Description
dict dict

A dictionary containing extracted metadata. The dictionary includes metadata indexed by country.

Source code in solvency2_data/eiopa_data.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
def extract_meta(rfr_filepath: str) -> dict:
    """
    Extracts metadata from an EIOPA RFR Excel file.

    Args:
        rfr_filepath (str): The path to the EIOPA RFR Excel file.

    Returns:
        dict: A dictionary containing extracted metadata.
            The dictionary includes metadata indexed by country.
    """
    logging.info("Extracting meta data :" + rfr_filepath)
    meta = read_meta(rfr_filepath)
    meta = pd.concat(meta).T
    meta.columns = meta.columns.droplevel()
    meta.index.name = "Country"
    meta = meta.sort_index()
    return meta

extract_spot_rates(rfr_filepath)

Extracts spot rates from an EIOPA RFR Excel file.

Parameters:

Name Type Description Default
rfr_filepath str

The path to the EIOPA RFR Excel file.

required

Returns:

Name Type Description
dict dict

A dictionary containing extracted spot rates. The dictionary includes spot rates indexed by scenario, currency code, and duration.

Source code in solvency2_data/eiopa_data.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
def extract_spot_rates(rfr_filepath: str) -> dict:
    """
    Extracts spot rates from an EIOPA RFR Excel file.

    Args:
        rfr_filepath (str): The path to the EIOPA RFR Excel file.

    Returns:
        dict: A dictionary containing extracted spot rates.
            The dictionary includes spot rates indexed by scenario, currency code, and duration.
    """
    logging.info("Extracting spots: " + rfr_filepath)
    # TODO: Complete this remap dictionary
    currency_codes_and_regions = {
        "EUR": "Euro",
        "PLN": "Poland",
        "CHF": "Switzerland",
        "USD": "United States",
        "GBP": "United Kingdom",
        "NOK": "Norway",
        "SEK": "Sweden",
        "DKK": "Denmark",
        "HRK": "Croatia",
    }
    currency_dict = dict((v, k) for k, v in currency_codes_and_regions.items())

    xls = pd.ExcelFile(rfr_filepath, engine="openpyxl")
    rates_tables = read_spot(xls)

    rates_tables = pd.concat(rates_tables)
    rates_tables = rates_tables.rename(columns=currency_dict)[currency_dict.values()]

    label_remap = {
        "RFR_spot_no_VA": "base",
        "RFR_spot_with_VA": "va",
        "Spot_NO_VA_shock_UP": "up",
        "Spot_NO_VA_shock_DOWN": "down",
        "Spot_WITH_VA_shock_UP": "va_up",
        "Spot_WITH_VA_shock_DOWN": "va_down",
    }
    rates_tables = rates_tables.rename(label_remap)

    rates_tables = rates_tables.stack().rename("spot")

    rates_tables.index.names = ["scenario", "duration", "currency_code"]
    rates_tables.index = rates_tables.index.reorder_levels([0, 2, 1])
    rates_tables = rates_tables.sort_index()
    return rates_tables

extract_spreads(spread_filepath)

Extracts spreads data from an EIOPA RFR spreads Excel file.

Parameters:

Name Type Description Default
spread_filepath str

The path to the EIOPA RFR spreads Excel file.

required

Returns:

Type Description

pandas.DataFrame: A DataFrame containing extracted spreads data. The DataFrame includes spreads indexed by type, currency code, credit curve step, and duration.

Source code in solvency2_data/eiopa_data.py
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
def extract_spreads(spread_filepath):
    """
    Extracts spreads data from an EIOPA RFR spreads Excel file.

    Args:
        spread_filepath (str): The path to the EIOPA RFR spreads Excel file.

    Returns:
        pandas.DataFrame: A DataFrame containing extracted spreads data.
            The DataFrame includes spreads indexed by type, currency code, credit curve step, and duration.
    """
    logging.info("Extracting spreads: " + spread_filepath)
    xls = pd.ExcelFile(spread_filepath, engine="openpyxl")
    spreads = read_spreads(xls)
    spreads_non_gov = pd.concat(
        {
            i: pd.concat(spreads[i])
            for i in [
                "financial fundamental spreads",
                "non-financial fundamental spreads",
            ]
        }
    )
    spreads_non_gov = spreads_non_gov.stack().rename("spread")
    spreads_non_gov.index.names = ["type", "currency_code", "duration", "cc_step"]
    spreads_non_gov.index = spreads_non_gov.index.reorder_levels([0, 1, 3, 2])
    spreads_non_gov = spreads_non_gov.rename(
        {
            "financial fundamental spreads": "fin",
            "non-financial fundamental spreads": "non_fin",
        }
    )
    return spreads_non_gov

extract_sym_adj(sym_adj_filepath, ref_date)

Extracts symmetric adjustment data from a file.

Parameters:

Name Type Description Default
sym_adj_filepath str

The path to the file containing symmetric adjustment data.

required
ref_date str

The reference date in the format "%Y-%m-%d".

required

Returns:

Type Description
DataFrame

pd.DataFrame or None: A DataFrame containing symmetric adjustment data. Returns None if there is a date mismatch between the reference date provided and the date in the file.

Source code in solvency2_data/eiopa_data.py
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def extract_sym_adj(sym_adj_filepath: str, ref_date: str) -> pd.DataFrame:
    """
    Extracts symmetric adjustment data from a file.

    Args:
        sym_adj_filepath (str): The path to the file containing symmetric adjustment data.
        ref_date (str): The reference date in the format "%Y-%m-%d".

    Returns:
        pd.DataFrame or None: A DataFrame containing symmetric adjustment data.
            Returns None if there is a date mismatch between the reference date provided
            and the date in the file.
    """
    df = pd.read_excel(
        sym_adj_filepath,
        sheet_name="Symmetric_adjustment",
        usecols="E, K",
        nrows=1,
        skiprows=7,
        header=None,
        names=["ref_date", "sym_adj"],
    ).squeeze("columns")

    input_ref = ref_date
    ref_check = df.at[0, "ref_date"].strftime("%Y-%m-%d")

    if input_ref != ref_check:
        logging.warning("Date mismatch in sym_adj file: " + sym_adj_filepath)
        logging.warning(
            "Try opening this file and setting the date correctly then save and close, and rerun."
        )
        return None
    else:
        df = df.set_index("ref_date")
        return df

get(ref_date, data_type='rfr', workspace=None)

Retrieves data from the EIOPA database for a given reference date and data type.

Parameters:

Name Type Description Default
ref_date str

The reference date in the format "%Y-%m-%d".

required
data_type str

The type of data to retrieve. Options: "rfr" (default), "meta", "spreads", "govies", "sym_adj".

'rfr'
workspace dict

A dictionary containing workspace directories and paths. If None, it retrieves workspace information using get_workspace() function. Defaults to None.

None

Returns:

Type Description

pandas.DataFrame or None: A DataFrame containing retrieved data. Returns None if no data is found for the given reference date and data type.

Source code in solvency2_data/eiopa_data.py
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
def get(ref_date: str, data_type: str = "rfr", workspace: dict = None):
    """
    Retrieves data from the EIOPA database for a given reference date and data type.

    Args:
        ref_date (str): The reference date in the format "%Y-%m-%d".
        data_type (str, optional): The type of data to retrieve.
            Options: "rfr" (default), "meta", "spreads", "govies", "sym_adj".
        workspace (dict, optional): A dictionary containing workspace directories and paths.
            If None, it retrieves workspace information using get_workspace() function.
            Defaults to None.

    Returns:
        pandas.DataFrame or None: A DataFrame containing retrieved data.
            Returns None if no data is found for the given reference date and data type.
    """
    # Validate the provided ref_date:
    ref_date = validate_date_string(ref_date)
    # Check if DB exists, if not, create it:
    if workspace is None:
        workspace = get_workspace()
    database = workspace["database"]
    db = EiopaDB(database)

    sql_map = {
        "rfr": "SELECT * FROM rfr WHERE ref_date = '" + ref_date + "'",
        "meta": "SELECT * FROM meta WHERE ref_date = '" + ref_date + "'",
        "spreads": "SELECT * FROM spreads WHERE ref_date = '" + ref_date + "'",
        "govies": "SELECT * FROM govies WHERE ref_date = '" + ref_date + "'",
        "sym_adj": "SELECT * FROM sym_adj WHERE ref_date = '" + ref_date + "'",
    }
    sql = sql_map.get(data_type)
    df = pd.read_sql(sql, con=db.conn)
    if df.empty:
        add_to_db(ref_date, db, data_type)
        df = pd.read_sql(sql, con=db.conn)
    if not df.empty:
        df = df.drop(columns=["url_id", "ref_date"])
        return df
    else:
        return None

get_workspace()

Retrieves workspace directories and paths from the configuration.

Returns:

Name Type Description
dict dict

A dictionary containing workspace directories and paths. The dictionary includes the following keys: - "database": The path to the EIOPA database file. - "raw_data": The path to the directory containing raw data.

Source code in solvency2_data/eiopa_data.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
def get_workspace() -> dict:
    """
    Retrieves workspace directories and paths from the configuration.

    Returns:
        dict: A dictionary containing workspace directories and paths.
            The dictionary includes the following keys:
                - "database": The path to the EIOPA database file.
                - "raw_data": The path to the directory containing raw data.
    """
    config = get_config().get("Directories")
    path_db = config.get("db_folder")
    database = os.path.join(path_db, "eiopa.db")
    path_raw = config.get("raw_data")
    return {"database": database, "raw_data": path_raw}

refresh()

Refreshes the EIOPA database by updating data for each month from January 2016 to the current month.

Returns:

Name Type Description
str

A message indicating that the database has been successfully rebuilt.

Source code in solvency2_data/eiopa_data.py
429
430
431
432
433
434
435
436
437
438
439
440
441
def refresh():
    """
    Refreshes the EIOPA database by updating data for each month from January 2016 to the current month.

    Returns:
        str: A message indicating that the database has been successfully rebuilt.
    """
    dr = pd.date_range(date(2016, 1, 31), date.today(), freq="M")
    # dr = pd.date_range(date(2021, 11, 30), date.today(), freq="M")
    for ref_date in dr:
        for data_type in ["rfr", "meta", "spreads", "govies", "sym_adj"]:
            _ = get(ref_date.date(), data_type)
    return "Database successfully rebuilt"

validate_date_string(ref_date)

Validates the input date string.

Parameters:

Name Type Description Default
ref_date str or date

The input date string or datetime.date object.

required

Returns:

Type Description

str or None: A validated date string in the format "%Y-%m-%d". Returns None if the input date type is not recognized or cannot be converted.

Source code in solvency2_data/eiopa_data.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
def validate_date_string(ref_date):
    """
    Validates the input date string.

    Args:
        ref_date (str or datetime.date): The input date string or datetime.date object.

    Returns:
        str or None: A validated date string in the format "%Y-%m-%d".
            Returns None if the input date type is not recognized or cannot be converted.
    """
    if type(ref_date) == datetime.date:
        return ref_date.strftime("%Y-%m-%d")
    elif isinstance(ref_date, str):
        try:
            return datetime.datetime.strptime(ref_date, "%Y-%m-%d").strftime("%Y-%m-%d")
        except (TypeError, ValueError):
            logging.warning("Date type not recognised. Try datetime.date or YYYY-mm-dd")
            return None
    else:
        return None

Returns links for EIOPA files

check_if_download(url)

Checks if the URL points to a downloadable resource.

Parameters:

Name Type Description Default
url str

The URL to check.

required

Returns:

Name Type Description
bool bool

True if the resource is downloadable, False otherwise.

Source code in solvency2_data/scraping.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def check_if_download(url: str) -> bool:
    """
    Checks if the URL points to a downloadable resource.

    Args:
        url (str): The URL to check.

    Returns:
        bool: True if the resource is downloadable, False otherwise.
    """
    headers = requests.head(url).headers
    # downloadable = 'attachment' in headers.get('Content-Disposition', '')
    downloadable = headers.get("Content-Type") in [
        "application/zip",
        "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
    ]
    return downloadable

Generates the link for downloading the selected type of data for a given date.

Parameters:

Name Type Description Default
ref_date str

The reference date in the format 'YYYY-MM-DD'.

required
data_type str

The type of the dataset. Defaults to 'rfr'.

'rfr'

Returns:

Name Type Description
str str

The valid link to the dataset.

Example

from datetime import date import pandas as pd dr = pd.date_range(date(2016, 1, 31), date(2023, 5, 31), freq='M') rfr_links = {i.strftime('%Y%m%d'): eiopa_link(i.strftime('%Y-%m-%d')) for i in dr} sym_adj_links = {i.strftime('%Y%m%d'): eiopa_link(i.strftime('%Y-%m-%d'), 'sym_adj') for i in dr}

Source code in solvency2_data/scraping.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def eiopa_link(ref_date: str, data_type: str = "rfr") -> str:
    """
    Generates the link for downloading the selected type of data for a given date.

    Args:
        ref_date (str): The reference date in the format 'YYYY-MM-DD'.
        data_type (str, optional): The type of the dataset. Defaults to 'rfr'.

    Returns:
        str: The valid link to the dataset.

    Example:
        from datetime import date
        import pandas as pd
        dr = pd.date_range(date(2016, 1, 31), date(2023, 5, 31), freq='M')
        rfr_links = {i.strftime('%Y%m%d'): eiopa_link(i.strftime('%Y-%m-%d')) for i in dr}
        sym_adj_links = {i.strftime('%Y%m%d'): eiopa_link(i.strftime('%Y-%m-%d'), 'sym_adj') for i in dr}
    """
    data_type_remap = {"spreads": "rfr", "govies": "rfr", "meta": "rfr"}
    data_type = data_type_remap.get(data_type, data_type)
    urls = urls_dict.get(data_type)
    # Change format of ref_date string for EIOPA Excel files from YYYY-mm-dd to YYYYmmdd:
    reference_date = ref_date.replace("-", "")
    ref_date_datetime = datetime.datetime.strptime(ref_date, "%Y-%m-%d")
    str_year = ref_date_datetime.strftime("%Y")
    str_month = ref_date_datetime.strftime("%B").lower()
    if data_type == "rfr":
        # eiopa uses two naming conventions for the files
        filename1 = (
            ".*(?i:filename=)(?:%E2%80%8B)?"
            + "(?i:"
            + str_month
            + ")"
            + "(?:[-, _]|%20)"
            + str_year
            + ".*"
            ".*"
        )
        filename2 = ".*EIOPA_RFR_" + reference_date + ".zip"
        r = re.compile(filename1 + "|" + filename2)

    elif data_type == "sym_adj":
        # Regex to find the file :
        # ._ required for ._march_2019
        # Only matches on first 3 letters of months since some mis-spellings
        words_in_link = ["symmetric", "adjustment", "equity", "capital", "charge"]
        r = re.compile(
            ".*(?i:eiopa)(?:[-, _]|%20)"
            + "(?:[-, _]|%20)".join(words_in_link)
            + "(?:[-, _]|%20)"
            + "(?i:"
            + str_month[:3]
            + ")"
            + "[a-z]{0,"
            + str(len(str_month) - 3)
            + "}(?:[-, _]|%20)"
            + str_year
            + "(?:_[0-9]{0,1})?(?:.xlsx|$)"
        )

    valid_link = get_links(urls, r)

    problem_dates = {"rfr": ["2021-11-30"]}
    if ref_date in problem_dates.get(data_type, []):
        valid_link = False

    if not valid_link:
        manual_links = {
            "sym_adj": {
                "2020-06-30": "https://www.eiopa.europa.eu/system/files/2020-06/eiopa_symmetric_adjustment_equity_capital_charge_16_06_2020.xlsx",
                "2020-07-31": "https://www.eiopa.europa.eu/system/files/2020-07/eiopa_symmetric_adjustment_equity_capital_charge_14_07_2020.xlsx",
            },
            "rfr": {
                "2021-11-30": "https://www.eiopa.europa.eu/system/files/2021-12/eiopa_rfr_20211130.zip"
            },
        }
        valid_link = manual_links.get(data_type).get(ref_date)

    if not valid_link:
        raise FileNotFoundError(
            "Error: no EIOPA file found for date: "
            + ref_date
            + "; Source: "
            + data_type
        )

    return valid_link

Retrieves valid download links from a list of URLs.

Parameters:

Name Type Description Default
urls str

A list of URLs to search for links.

required
r str

The pattern to match for links.

required

Returns:

Name Type Description
list list

A list of valid download links.

Source code in solvency2_data/scraping.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def get_links(urls: str, r: str) -> list:
    """
    Retrieves valid download links from a list of URLs.

    Args:
        urls (str): A list of URLs to search for links.
        r (str): The pattern to match for links.

    Returns:
        list: A list of valid download links.
    """
    raw_links = []
    for page in urls:
        if len(raw_links) == 0:
            resp = requests.get(page)
            soup = bs.BeautifulSoup(resp.text, "lxml")
            for link in soup.find_all("a", {"href": r}):
                if link.get("href")[0] == "/":
                    # correct relative urls
                    link_before_redirect = "https://www.eiopa.europa.eu" + link.get(
                        "href"
                    )
                else:
                    link_before_redirect = link.get("href")
                # Check if there is a redirect:
                raw_links.append(link_before_redirect)

    valid_links = []
    for url in raw_links:
        if check_if_download(url):
            valid_links.append(url)
        else:
            redirect = lookthrough_redirect(url)
            if check_if_download(redirect):
                valid_links.append(redirect)
    if len(valid_links) > 0:
        return valid_links[0]
    else:
        return None

lookthrough_redirect(url)

Looks through redirects and returns the final URL.

Parameters:

Name Type Description Default
url str

The URL to look through.

required

Returns:

Name Type Description
str str

The final URL after following redirects.

Source code in solvency2_data/scraping.py
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
def lookthrough_redirect(url: str) -> str:
    """
    Looks through redirects and returns the final URL.

    Args:
        url (str): The URL to look through.

    Returns:
        str: The final URL after following redirects.
    """
    try:
        resp = urllib.request.urlopen(url)
        file_url = resp.geturl()
    except ImportError:
        file_url = url
    return file_url

This module contains all the handler functions for the sqlite database storing the data

EiopaDB

Bases: object

Database object to store the EIOPA data.

Attributes:

Name Type Description
database str

Path to the database file.

Methods:

Name Description
__init__

Initialize database object.

reset

Hard reset of the database.

set_conn

Set database connection.

_close_conn

Close database connection.

get_set_id

Get the URL ID for a URL.

_add_set

Add a new URL to the catalog.

update_catalog

Update the catalog with new values.

Source code in solvency2_data/sqlite_handler.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
class EiopaDB(object):
    """
    Database object to store the EIOPA data.

    Attributes:
        database (str): Path to the database file.

    Methods:
        __init__(database): Initialize database object.
        reset(): Hard reset of the database.
        set_conn(): Set database connection.
        _close_conn(): Close database connection.
        get_set_id(url): Get the URL ID for a URL.
        _add_set(url): Add a new URL to the catalog.
        update_catalog(url_id, dict_vals): Update the catalog with new values.
    """

    def __init__(self, database):
        """
        Initialize the database.

        Args:
            database (str): Path to the database file.

        Returns:
            None
        """
        self.database = database
        if not os.path.isfile(database):
            root_folder = os.path.dirname(database)
            if not os.path.exists(root_folder):
                os.makedirs(root_folder)
            create_eiopa_db(database)
        self.set_conn()
        logging.info("DB initialised")

    def reset(self):
        """
        Hard reset of the database

        Args:
            None

        Returns:
            None

        """
        if os.path.exists(self.database):
            self._close_conn()
            os.remove(self.database)
        create_eiopa_db(self.database)
        self.set_conn()

    def set_conn(self):
        """
        Set database connection

        Args:
            None

        Returns:
            None

        """
        self.conn = create_connection(self.database)

    def _close_conn(self):
        """
        Close database connection

        Args:
            None

        Returns:
            None

        """
        if self.conn is not None:
            self.conn.close()

    def get_set_id(self, url):
        """
        Get the url id for a url
        If not there, check if valid then add

        Args:
            url: url to be found

        Returns:
            None

        """
        cur = self.conn.cursor()
        try:
            set_id = cur.execute(
                "SELECT url_id FROM catalog WHERE url = '" + url + "'"
            ).fetchone()
        except Error:
            pass
        if set_id is not None:
            set_id = set_id[0]  # Cursor returns a tuple and only want id
        else:
            set_id = self._add_set(url)
        return set_id

    def _add_set(self, url):
        """Private method, only called when url not already in catalog"""
        sql = "INSERT INTO catalog (url) VALUES ('" + url + "')"
        cur = self.conn.cursor()
        cur.execute(sql)
        self.conn.commit()
        return cur.lastrowid

    def update_catalog(self, url_id: int, dict_vals: dict):
        set_lines = ", ".join([f"{k}='{v}'" for k, v in dict_vals.items()])
        sql = "UPDATE catalog SET %s WHERE url_id=%s" % (set_lines, url_id)
        cur = self.conn.cursor()
        cur.execute(sql)
        self.conn.commit()

__init__(database)

Initialize the database.

Parameters:

Name Type Description Default
database str

Path to the database file.

required

Returns:

Type Description

None

Source code in solvency2_data/sqlite_handler.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
def __init__(self, database):
    """
    Initialize the database.

    Args:
        database (str): Path to the database file.

    Returns:
        None
    """
    self.database = database
    if not os.path.isfile(database):
        root_folder = os.path.dirname(database)
        if not os.path.exists(root_folder):
            os.makedirs(root_folder)
        create_eiopa_db(database)
    self.set_conn()
    logging.info("DB initialised")

get_set_id(url)

Get the url id for a url If not there, check if valid then add

Parameters:

Name Type Description Default
url

url to be found

required

Returns:

Type Description

None

Source code in solvency2_data/sqlite_handler.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def get_set_id(self, url):
    """
    Get the url id for a url
    If not there, check if valid then add

    Args:
        url: url to be found

    Returns:
        None

    """
    cur = self.conn.cursor()
    try:
        set_id = cur.execute(
            "SELECT url_id FROM catalog WHERE url = '" + url + "'"
        ).fetchone()
    except Error:
        pass
    if set_id is not None:
        set_id = set_id[0]  # Cursor returns a tuple and only want id
    else:
        set_id = self._add_set(url)
    return set_id

reset()

Hard reset of the database

Returns:

Type Description

None

Source code in solvency2_data/sqlite_handler.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def reset(self):
    """
    Hard reset of the database

    Args:
        None

    Returns:
        None

    """
    if os.path.exists(self.database):
        self._close_conn()
        os.remove(self.database)
    create_eiopa_db(self.database)
    self.set_conn()

set_conn()

Set database connection

Returns:

Type Description

None

Source code in solvency2_data/sqlite_handler.py
64
65
66
67
68
69
70
71
72
73
74
75
def set_conn(self):
    """
    Set database connection

    Args:
        None

    Returns:
        None

    """
    self.conn = create_connection(self.database)

create_connection(database)

create a database connection to the SQLite database

Parameters:

Name Type Description Default
database str

database specified by database file path

required

Returns:

Type Description

connection object or None

Source code in solvency2_data/sqlite_handler.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
def create_connection(database: str):
    """
    create a database connection to the SQLite database

    Args:
        database: database specified by database file path

    Returns:
        connection object or None

    """
    conn = None
    try:
        conn = sqlite3.connect(database)
        return conn
    except Error as e:
        logging.error(e)

    return conn

create_eiopa_db(database='eiopa.db')

Create the EIOPA database

Parameters:

Name Type Description Default
database str

name of the database to be created

'eiopa.db'

Returns:

Type Description
None

None

Source code in solvency2_data/sqlite_handler.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
def create_eiopa_db(database: str = r"eiopa.db") -> None:
    """
    Create the EIOPA database

    Args:
        database: name of the database to be created

    Returns:
        None
    """
    table_def = {
        "catalog": """ CREATE TABLE IF NOT EXISTS catalog (
                                     url_id INTEGER NOT NULL PRIMARY KEY,
                                     url TEXT,
                                     set_type TEXT,
                                     primary_set BOOLEAN,
                                     ref_date TEXT
                                     ); """,
        "meta": """ CREATE TABLE IF NOT EXISTS meta (
                                     url_id INTEGER NOT NULL,
                                     ref_date TEXT,
                                     Country TEXT,
                                     Info TEXT,
                                     Coupon_freq INTEGER,
                                     LLP INTEGER,
                                     Convergence INTEGER,
                                     UFR REAL,
                                     alpha REAL,
                                     CRA REAL,
                                     VA REAL,
                                     FOREIGN KEY (url_id) REFERENCES catalog (url_id)
                                        ON DELETE CASCADE ON UPDATE NO ACTION
                                     ); """,
        "rfr": """ CREATE TABLE IF NOT EXISTS rfr (
                                     url_id INTEGER NOT NULL,
                                     ref_date TEXT,
                                     scenario TEXT,
                                     currency_code TEXT,
                                     duration INTEGER,
                                     spot REAL,
                                     FOREIGN KEY (url_id) REFERENCES catalog (url_id)
                                        ON DELETE CASCADE ON UPDATE NO ACTION
                                     ); """,
        "spreads": """CREATE TABLE IF NOT EXISTS spreads (
                                        url_id INTEGER NOT NULL,
                                        ref_date TEXT,
                                        type TEXT,
                                        currency_code TEXT,
                                        duration INTEGER,
                                        cc_step INTEGER,
                                        spread REAL,
                                        FOREIGN KEY (url_id) REFERENCES catalog (url_id)
                                        ON DELETE CASCADE ON UPDATE NO ACTION
                                        );""",
        "govies": """CREATE TABLE IF NOT EXISTS govies (
                                            url_id INTEGER NOT NULL,
                                            ref_date TEXT,
                                            country_code TEXT,
                                            duration INTEGER,
                                            spread REAL,
                                            FOREIGN KEY (url_id) REFERENCES catalog (url_id)
                                        ON DELETE CASCADE ON UPDATE NO ACTION
                                            );""",
        "sym_adj": """CREATE TABLE IF NOT EXISTS sym_adj (
                                    url_id INTEGER NOT NULL,
                                    ref_date TEXT,
                                    sym_adj REAL,
                                    FOREIGN KEY (url_id) REFERENCES catalog (url_id)
                                ON DELETE CASCADE ON UPDATE NO ACTION
                                    );""",
    }
    # create a database connection
    conn = create_connection(database)
    # create tables
    if conn is not None:
        # create tables
        for key, val in table_def.items():
            exec_sql(conn, val)
    else:
        logging.error("Error! cannot create the database connection.")

exec_sql(conn, sql)

Execute sql in connection

Parameters:

Name Type Description Default
conn

database connection

required
sql str

sql statement to be executed

required

Returns:

Type Description

None

Source code in solvency2_data/sqlite_handler.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def exec_sql(conn, sql: str):
    """
    Execute sql in connection

    Args:
        conn: database connection
        sql: sql statement to be executed

    Returns:
        None

    """

    try:
        c = conn.cursor()
        c.execute(sql)
    except Error as e:
        logging.error(e)

Calculates Smith-Wilson parameters and returns output based on the specified parameters.

Note: Prices of all instruments are set to 1 by construction. Hence 1: if Instrument = Zero then for Zero i there is only one pay-off of (1+r(i))^u(i) at time u(i). Hence 2: if Instrument = Swap or Bond then for Swap/Bond i there are pay-offs of r(i)/nrofcoup at time 1/nrofcoup, 2/nrofcoup, ... u(i)-1/nrofcoup plus a final pay-off of 1+r(i)/nrofcoup at time u(i).

Parameters:

Name Type Description Default
instrument str

Type of financial instrument. Default is "Zero".

'Zero'
liquid_maturities list

Liquid maturities.

[]
RatesIn dict

Input dictionary for RatesIn.

{}
nrofcoup int

Number of Coupon Payments per Year. Default is 1.

1
cra float

Credit Risk Adjustment in basispoints. Default is 0.

0
ufr float

Ultimate Forward Rate annual compounded (perunage). Default is 0.

0
min_alfa float

Minimum value for alfa. Default is 0.05.

0.05
tau float

Tau value. Default is 1.

1
T2 int

Convergence Maturity. Default is 60.

60
precision int

Precision value. Default is 6.

6
method str

Calculation method. Default is "brute_force".

'brute_force'
output_type str

Type of output. Default is "zero rates annual compounding".

'zero rates annual compounding'

Returns:

Name Type Description
output

Calculated output based on the specified parameters.

Example

instrument = "Zero" liquid_maturities = [1, 2, 3, 4, 5] RatesIn = {0: 0.02, 1: 0.025, 2: 0.03, 3: 0.035, 4: 0.04, 5: 0.045} nrofcoup = 2 cra = 0.01 ufr = 0.05 min_alfa = 0.05 tau = 1 T2 = 60 precision = 6 method = "brute_force" output_type = "zero rates annual compounding" smith_wilson(instrument, liquid_maturities, RatesIn, nrofcoup, cra, ufr, min_alfa, tau, T2, precision, method, output_type) array([0. , 0.01984642, 0.04040711, 0.06171705, 0.08381221, 0.10672926, 0.13050535, 0.15517806, 0.18078504, 0.20736481, 0.23495648, 0.26360038, 0.29333873, 0.3242144 , 0.35627157, 0.3895553 , 0.42411294, 0.4599934 , 0.49724737, 0.53592752, 0.57608986, 0.61779295, 0.66109833, 0.70607181, 0.75278461, 0.80131366, 0.85174183, 0.90415729, 0.95865366, 1.01533039, 1.0742924 , 1.13564943, 1.19951555, 1.26601055, 1.33526045, 1.40739786, 1.48256135, 1.5608958 , 1.6425528 , 1.72769014, 1.81647324, 1.90907556, 2.00567914, 2.10647707, 2.21167306, 2.32148215, 2.43613139, 2.55586064, 2.68092039, 2.81157657, 2.94810739, 3.09080731, 3.23998602, 3.39596949, 3.559099 , 3.72973118, 3.90823814, 4.09500861, 4.29045285, 4.49400082, 4.70510529, 4.92324198, 5.14791281, 5.37864303, 5.61498852, 5.85652901, 6.10287445, 6.35366537, 6.6085712 , 6.86728774, 7.12954064, 7.395082 , 7.66368905, 7.93516083, 8.20931788, 8.48599994, 8.76506649, 9.04639664, 9.329887 , 9.61545067, 9.90301596, 10.19252555, 10.48393262, 10.77720086, 11.07230382, 11.36922416, 11.66795383, 11.96849432, 12.27085209, 12.57503994, 12.88007745, 13.18599135, 13.49281377, 13.80058263, 14.109341 , 14.41913647, 14.73002158, 15.04205535, 15.35530365, 15.66983964, 15.98574325, 16.30310069, 16.62199494, 16.94251238, 17.2647438 , 17.5887832 , 17.91472858, 18.24268076, 18.5727421 , 18.90502131, 19.23963426, 19.57670285, 19.9163558 , 20.25872748, 20.60395864, 20.95219615, 21.30359381, 21.65831201, 22.01651854, 22.3783874 ])

Source code in solvency2_data/smith_wilson.py
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
def smith_wilson(
    instrument: str = "Zero",
    liquid_maturities: list = [],
    RatesIn: dict = {},
    nrofcoup: int = 1,
    cra: float = 0,
    ufr: float = 0,
    min_alfa: float = 0.05,
    tau: float = 1,
    T2: int = 60,
    precision: int = 6,
    method: str = "brute_force",
    output_type: str = "zero rates annual compounding",
):
    """
    Calculates Smith-Wilson parameters and returns output based on the specified parameters.

    Note: Prices of all instruments are set to 1 by construction.
    Hence 1: if Instrument = Zero then for Zero i there is only one pay-off
    of (1+r(i))^u(i) at time u(i).
    Hence 2: if Instrument = Swap or Bond then for Swap/Bond i there are
    pay-offs of r(i)/nrofcoup at time 1/nrofcoup, 2/nrofcoup, ...
    u(i)-1/nrofcoup plus a final pay-off of 1+r(i)/nrofcoup at time u(i).

    Args:
        instrument (str): Type of financial instrument. Default is "Zero".
        liquid_maturities (list): Liquid maturities.
        RatesIn (dict): Input dictionary for RatesIn.
        nrofcoup (int): Number of Coupon Payments per Year. Default is 1.
        cra (float): Credit Risk Adjustment in basispoints. Default is 0.
        ufr (float): Ultimate Forward Rate annual compounded (perunage). Default is 0.
        min_alfa (float): Minimum value for alfa. Default is 0.05.
        tau (float): Tau value. Default is 1.
        T2 (int): Convergence Maturity. Default is 60.
        precision (int): Precision value. Default is 6.
        method (str): Calculation method. Default is "brute_force".
        output_type (str): Type of output. Default is "zero rates annual compounding".

    Returns:
        output: Calculated output based on the specified parameters.

    Example:
        >>> instrument = "Zero"
        >>> liquid_maturities = [1, 2, 3, 4, 5]
        >>> RatesIn = {0: 0.02, 1: 0.025, 2: 0.03, 3: 0.035, 4: 0.04, 5: 0.045}
        >>> nrofcoup = 2
        >>> cra = 0.01
        >>> ufr = 0.05
        >>> min_alfa = 0.05
        >>> tau = 1
        >>> T2 = 60
        >>> precision = 6
        >>> method = "brute_force"
        >>> output_type = "zero rates annual compounding"
        >>> smith_wilson(instrument, liquid_maturities, RatesIn, nrofcoup, cra, ufr, min_alfa, tau, T2, precision, method, output_type)
        array([0.        , 0.01984642, 0.04040711, 0.06171705, 0.08381221,
               0.10672926, 0.13050535, 0.15517806, 0.18078504, 0.20736481,
               0.23495648, 0.26360038, 0.29333873, 0.3242144 , 0.35627157,
               0.3895553 , 0.42411294, 0.4599934 , 0.49724737, 0.53592752,
               0.57608986, 0.61779295, 0.66109833, 0.70607181, 0.75278461,
               0.80131366, 0.85174183, 0.90415729, 0.95865366, 1.01533039,
               1.0742924 , 1.13564943, 1.19951555, 1.26601055, 1.33526045,
               1.40739786, 1.48256135, 1.5608958 , 1.6425528 , 1.72769014,
               1.81647324, 1.90907556, 2.00567914, 2.10647707, 2.21167306,
               2.32148215, 2.43613139, 2.55586064, 2.68092039, 2.81157657,
               2.94810739, 3.09080731, 3.23998602, 3.39596949, 3.559099  ,
               3.72973118, 3.90823814, 4.09500861, 4.29045285, 4.49400082,
               4.70510529, 4.92324198, 5.14791281, 5.37864303, 5.61498852,
               5.85652901, 6.10287445, 6.35366537, 6.6085712 , 6.86728774,
               7.12954064, 7.395082  , 7.66368905, 7.93516083, 8.20931788,
               8.48599994, 8.76506649, 9.04639664, 9.329887  , 9.61545067,
               9.90301596, 10.19252555, 10.48393262, 10.77720086, 11.07230382,
               11.36922416, 11.66795383, 11.96849432, 12.27085209, 12.57503994,
               12.88007745, 13.18599135, 13.49281377, 13.80058263, 14.109341  ,
               14.41913647, 14.73002158, 15.04205535, 15.35530365, 15.66983964,
               15.98574325, 16.30310069, 16.62199494, 16.94251238, 17.2647438 ,
               17.5887832 , 17.91472858, 18.24268076, 18.5727421 , 18.90502131,
               19.23963426, 19.57670285, 19.9163558 , 20.25872748, 20.60395864,
               20.95219615, 21.30359381, 21.65831201, 22.01651854, 22.3783874 ])
    """
    assert (
        instrument == "Zero" and nrofcoup == 1
    ), "instrument is zero bond, but with nrofcoup unequal to 1."
    assert method == "brute_force", "Only brute force method is implemented."
    assert instrument == "Zero", "No other instruments implemented yet."

    # the number of liquid rates
    n = len(liquid_maturities)
    # nrofcoup * maximum liquid maturity
    m = nrofcoup * max(liquid_maturities)

    log_ufr = np.log(1 + ufr)
    tau = tau / 10000
    cra = cra / 10000

    # Q' matrix according to 146 of specs;
    q = q_matrix(instrument, n, m, liquid_maturities, RatesIn, nrofcoup, cra, log_ufr)

    # Determine optimal alfa with corresponding gamma
    if method == "brute_force":
        alfa, gamma = optimal_alfa(min_alfa, q, nrofcoup, T2, tau, precision)
    alfa = np.round(alfa, 6)

    # Now the SW-present value function according to 154 of the specs can be
    # calculated: p(v)=exp(-lnUFR*v)*(1+H(v,u)*Qb)
    # The G(v,u) matrix for maturities v = 1 to 121 according to 142 of the
    # technical specs (Note: maturity 121 will not be used; see later)
    g = np.fromfunction(
        lambda i, j: np.where(
            j / nrofcoup > i,
            alfa * (1 - np.exp(-alfa * j / nrofcoup) * np.cosh(alfa * i)),
            alfa * np.exp(-alfa * i) * np.sinh(alfa * j / nrofcoup),
        ),
        (121, m),
    )

    # The H(v,u) matrix for maturities v = 1 to 121 according to 139
    # of the technical specs
    # h[i, j] = big_h(alfa * i, alfa * (j+1) / nrofcoup) -> strange,
    # is different from earlier def
    h = np.fromfunction(
        lambda i, j: big_h(alfa * i / nrofcoup, alfa * (j + 1) / nrofcoup), (122, m)
    )

    # First a temporary discount-vector will be used to store the in-between
    # result H(v,u)*Qb = #(v,u)*gamma (see 154 of the specs)
    temptempdiscount = np.matmul(h, gamma)
    # Calculating G(v,u)*Qb according to 158 of the specs
    temptempintensity = np.matmul(g, gamma)

    tempdiscount = np.zeros(121)
    tempintensity = np.zeros(121)
    for i in range(0, 121):
        tempdiscount[i] = temptempdiscount[i][0]
        tempintensity[i] = temptempintensity[i][0]

    # calculating (1'-exp(-alfa*u'))Qb as subresult for 160 of specs
    res1 = sum(
        (1 - np.exp(-alfa * (i + 1) / nrofcoup)) * gamma[i, 0] for i in range(0, m)
    )

    # yield intensities
    yldintensity = np.zeros(121)
    yldintensity[0] = log_ufr - alfa * res1  # calculating 160 of the specs
    # calculating 158 of the specs for maturity 1 year
    yldintensity[1:] = log_ufr - np.log(1 + tempdiscount[1:]) / np.arange(1, 121)

    # forward intensities # calculating 158 of the specs for higher maturities
    fwintensity = np.zeros(121)
    fwintensity[0] = log_ufr - alfa * res1  # calculating 160 of the specs
    fwintensity[1:] = log_ufr - tempintensity[1:] / (1 + tempdiscount[1:])

    # discount rates
    discount = np.zeros(121)
    discount[0] = 1
    discount[1:] = np.exp(-log_ufr * np.arange(1, 121)) * (1 + tempdiscount[1:])

    # forward rates annual compounding
    forwardac = np.zeros(121)
    forwardac[0] = 0
    forwardac[1:] = discount[:-1] / discount[1:] - 1

    # zero rates annual compounding
    zeroac = np.zeros(121)
    zeroac[0] = 0
    zeroac[1:] = np.power(discount[1:], -1 / np.arange(1, 121)) - 1

    if output_type == "zero rates annual compounding":
        output = zeroac
    elif output_type == "forward rate annual compounding":
        output = forwardac
    elif output_type == "discount rates":
        output = discount
    elif output_type == "forward intensities":
        output = fwintensity
    elif output_type == "yield intensities":
        output = yldintensity
    elif output_type == "alfa":
        output = alfa

    return output

DiscountedValue4par2forwards(sum_df=0, last_df=0, par_rate=0, forward_rate=0, t_min_k=0)

Calculates the discounted value for two-factor parallel forwards.

Parameters:

Name Type Description Default
sum_df float

The sum of discount factors. Defaults to 0.

0
last_df float

The last discount factor. Defaults to 0.

0
par_rate float

The par rate. Defaults to 0.

0
forward_rate float

The forward rate. Defaults to 0.

0
t_min_k int

The difference between the two terms. Defaults to 0.

0

Returns:

Name Type Description
float float

The calculated discounted value.

Source code in solvency2_data/alternative_extrapolation.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
def DiscountedValue4par2forwards(
    sum_df: float = 0,
    last_df: float = 0,
    par_rate: float = 0,
    forward_rate: float = 0,
    t_min_k: int = 0,
) -> float:
    """
    Calculates the discounted value for two-factor parallel forwards.

    Args:
        sum_df (float, optional): The sum of discount factors. Defaults to 0.
        last_df (float, optional): The last discount factor. Defaults to 0.
        par_rate (float, optional): The par rate. Defaults to 0.
        forward_rate (float, optional): The forward rate. Defaults to 0.
        t_min_k (int, optional): The difference between the two terms. Defaults to 0.

    Returns:
        float: The calculated discounted value.
    """
    disc_val_1 = sum_df * par_rate
    disc_val_2 = 0
    for i in range(1, t_min_k + 1):
        disc_val_1 += par_rate * last_df / ((1 + forward_rate) ** i)
        disc_val_2 -= i * par_rate * last_df / ((1 + forward_rate) ** (i + 1))
    disc_val_1 += last_df / ((1 + forward_rate) ** t_min_k) - 1
    disc_val_2 -= t_min_k * last_df / ((1 + forward_rate) ** (t_min_k + 1))
    return disc_val_1, disc_val_2

FromParToForwards(term_struct=None, span=120, max_runs=MAX_RUNS, max_error=MAX_ERROR)

Converts a par rate term structure to forward rates.

Parameters:

Name Type Description Default
term_struct Series

The par rate term structure. Defaults to None.

None
span int

The span of the forward rates. Defaults to 120.

120
max_runs int

The maximum number of iterations for convergence. Defaults to MAX_RUNS.

MAX_RUNS
max_error float

The maximum error for convergence. Defaults to MAX_ERROR.

MAX_ERROR

Returns:

Type Description

pd.Series: The forward rates term structure.

Source code in solvency2_data/alternative_extrapolation.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
def FromParToForwards(
    term_struct: pd.Series(dtype="float64") = None,
    span: int = 120,
    max_runs: int = MAX_RUNS,
    max_error: float = MAX_ERROR,
):
    """
    Converts a par rate term structure to forward rates.

    Args:
        term_struct (pd.Series, optional): The par rate term structure. Defaults to None.
        span (int, optional): The span of the forward rates. Defaults to 120.
        max_runs (int, optional): The maximum number of iterations for convergence. Defaults to MAX_RUNS.
        max_error (float, optional): The maximum error for convergence. Defaults to MAX_ERROR.

    Returns:
        pd.Series: The forward rates term structure.
    """
    forwards_struct = np.zeros(span)

    sum_df = 0
    df = 1
    previous_maturity = 0
    for maturity in term_struct.keys():
        f = 0
        t_min_k = maturity - previous_maturity
        disc_val_1, disc_val_2 = DiscountedValue4par2forwards(
            sum_df, df, term_struct[maturity], f, t_min_k
        )
        k = 0
        while np.abs(disc_val_1) >= max_error and k <= max_runs:
            f = f - disc_val_1 / disc_val_2
            disc_val_1, disc_val_2 = DiscountedValue4par2forwards(
                sum_df, df, term_struct[maturity], f, t_min_k
            )
            k = k + 1
        for i in range(previous_maturity + 1, maturity + 1):
            forwards_struct[i - 1] = f
            df /= 1 + forwards_struct[i - 1]
            sum_df += df
        previous_maturity = maturity

    for i in range(term_struct.keys()[-1], span + 1):
        forwards_struct[i - 1] = forwards_struct[i - 2]

    return pd.Series(data=forwards_struct, index=range(1, span + 1), dtype="float64")

create_swap_struct(rfr=None, additional_swaps={})

Creates a swap structure.

Parameters:

Name Type Description Default
rfr Series

The risk-free rate term structure. Defaults to None.

None
additional_swaps dict

Additional swaps to be included. Defaults to {}.

{}

Returns:

Type Description
Series(dtype=float64)

pd.Series: The swap structure.

Source code in solvency2_data/alternative_extrapolation.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
def create_swap_struct(
    rfr: pd.Series(dtype="float64") = None, additional_swaps: dict = {}
) -> pd.Series(dtype="float64"):
    """
    Creates a swap structure.

    Args:
        rfr (pd.Series, optional): The risk-free rate term structure. Defaults to None.
        additional_swaps (dict, optional): Additional swaps to be included. Defaults to {}.

    Returns:
        pd.Series: The swap structure.
    """
    swap_struct = OrderedDict()
    denom = 0
    for duration in range(1, 21):
        rate = rfr[duration]
        denom += (1 + rate) ** (-duration)
        swap_struct[duration] = (1 - (1 + rate) ** (-duration)) / denom
    for key in additional_swaps.keys():
        swap_struct[key] = additional_swaps[key] - CRA
    return pd.Series(
        index=swap_struct.keys(), data=swap_struct.values(), dtype="float64"
    )

forwardstruct2termstruct(forward_struct)

Converts a forward rate structure to a term structure.

Parameters:

Name Type Description Default
forward_struct Series

The forward rate structure.

required

Returns:

Type Description
Series(dtype=float64)

pd.Series: The term structure.

Source code in solvency2_data/alternative_extrapolation.py
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def forwardstruct2termstruct(
    forward_struct: pd.Series(dtype="float64"),
) -> pd.Series(dtype="float64"):
    """
    Converts a forward rate structure to a term structure.

    Args:
        forward_struct (pd.Series): The forward rate structure.

    Returns:
        pd.Series: The term structure.
    """
    alt_term_struct = pd.Series(index=forward_struct.index, dtype="float64")
    alt_term_struct[1] = forward_struct[1]
    previous_forward = 1 + alt_term_struct[1]
    for i in range(2, len(forward_struct) + 1):
        alt_term_struct[i] = (previous_forward * (1 + forward_struct[i])) ** (1 / i) - 1
        previous_forward = (1 + alt_term_struct[i]) ** i
    return pd.Series(data=alt_term_struct, index=forward_struct.index, dtype="float64")

Common utilities shared across modules

get_config()

Reads the configuration from the solvency2_data.cfg file.

Returns:

Name Type Description
dict

A dictionary containing the configuration settings.

Example

get_config() {'[Section1]': {'key1': 'value1', 'key2': 'value2'}, '[Section2]': {'key3': 'value3'}}

Source code in solvency2_data/util.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
def get_config():
    """
    Reads the configuration from the solvency2_data.cfg file.

    Returns:
        dict: A dictionary containing the configuration settings.

    Example:
        >>> get_config()
        {'[Section1]': {'key1': 'value1', 'key2': 'value2'}, '[Section2]': {'key3': 'value3'}}
    """
    # look in current directory for .cfg file
    # if not exists then take the .cfg file in the package directory
    config = configparser.ConfigParser()
    fname = "solvency2_data.cfg"
    if os.path.isfile(fname):
        config.read(fname)
    else:
        config.read(os.path.join(os.path.dirname(__file__), fname))
    return config._sections

set_config(new_value, existing_key='data_folder')

Sets a new value for the specified key in the configuration file solvency2_data.cfg.

Parameters:

Name Type Description Default
new_value str

The new value to set for the specified key.

required
existing_key str

The key whose value needs to be updated. Default is "data_folder".

'data_folder'

Returns:

Name Type Description
int

Returns 0 upon successful completion.

Example

set_config("/new/data/folder", "data_folder") Download paths updated 0

Source code in solvency2_data/util.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def set_config(new_value: str, existing_key: str = "data_folder"):
    """
    Sets a new value for the specified key in the configuration file solvency2_data.cfg.

    Args:
        new_value (str): The new value to set for the specified key.
        existing_key (str): The key whose value needs to be updated. Default is "data_folder".

    Returns:
        int: Returns 0 upon successful completion.

    Example:
        >>> set_config("/new/data/folder", "data_folder")
        Download paths updated
        0
    """
    config = configparser.ConfigParser()
    fname = "solvency2_data.cfg"
    fpath = os.path.join(os.path.dirname(__file__), fname)
    config.read(fpath)

    if existing_key == "data_folder":
        for k in config["Directories"]:
            # print(k)
            config["Directories"][k] = new_value

    with open(fpath, "w") as configfile:
        config.write(configfile)
    print("Download paths updated")
    return 0