Skip to content

Commit 9e3d952

Browse files
committed
Merge branch 'main' of https://github.com/ladisk/pyuff
2 parents eda6a97 + 3681182 commit 9e3d952

9 files changed

+204
-97
lines changed

.github/workflows/python-package.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ jobs:
88
runs-on: ubuntu-latest
99
strategy:
1010
matrix:
11-
python-version: ["3.8", "3.9", "3.10", "3.11"]
11+
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
1212

1313
steps:
1414
- uses: actions/checkout@v3
@@ -29,4 +29,4 @@ jobs:
2929
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
3030
- name: Test with pytest
3131
run: |
32-
pytest
32+
pytest

README.rst

+2-7
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
|pytest| |documentation| |binder|
1+
|pytest| |documentation|
22

33
pyuff
44
=====
@@ -138,13 +138,8 @@ Or we can use support function ``prepare_58`` to prepare the dictionary for crea
138138
ordinate_spec_data_type=12,
139139
orddenom_spec_data_type=13)
140140
141-
142-
|binder| to test the *pyuff Showcase.ipynb* online.
143-
144-
.. |binder| image:: http://mybinder.org/badge.svg
145-
:target: http://mybinder.org:/repo/ladisk/pyuff
146141
.. |pytest| image:: https://github.com/ladisk/pyuff/actions/workflows/python-package.yml/badge.svg
147142
:target: https://github.com/ladisk/pyuff/actions
148143
.. |documentation| image:: https://readthedocs.org/projects/pyuff/badge/?version=latest
149144
:target: https://pyuff.readthedocs.io/en/latest/?badge=latest
150-
:alt: Documentation Status
145+
:alt: Documentation Status
3.4 MB
Binary file not shown.
Binary file not shown.

pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "pyuff"
7-
version = "2.4.3"
7+
version = "2.4.4"
88
authors = [{name = "Primož Čermelj, Janko Slavič", email = "[email protected]"}]
99
maintainers = [{name = "Janko Slavič et al.", email = "[email protected]"}]
1010
license = "MIT"
@@ -40,4 +40,4 @@ documentation = "https://pyuff.readthedocs.io/en/latest/"
4040
source = "https://github.com/ladisk/pyuff"
4141

4242
[tool.hatch.build.targets.sdist]
43-
include = ["pyuff/*"]
43+
include = ["pyuff/*"]

pyuff/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "2.4.3"
1+
__version__ = "2.4.4"
22
from .pyuff import *
33
from .datasets import *
44

pyuff/datasets/dataset_58.py

+145-64
Original file line numberDiff line numberDiff line change
@@ -1021,8 +1021,18 @@ def _write58(fh, dset, mode='add', _filename=None, force_double=True):
10211021
raise Exception('Error writing data-set #58')
10221022

10231023

1024-
def _extract58(block_data):
1025-
"""Extract function at nodal DOF - data-set 58."""
1024+
def _extract58(block_data, header_only=False):
1025+
"""
1026+
Extract function at nodal DOF - data-set 58.
1027+
1028+
:param header_only: False (default). If True the header data will be
1029+
extracted, only (useful with large files).
1030+
"""
1031+
1032+
1033+
1034+
1035+
10261036
dset = {'type': 58, 'binary': 0}
10271037
try:
10281038
binary = False
@@ -1067,70 +1077,78 @@ def _extract58(block_data):
10671077
'z_axis_axis_units_lab']))
10681078
# Body
10691079
# split_data = ''.join(split_data[13:])
1070-
if binary:
1071-
split_data = b''.join(block_data.splitlines(True)[13:])
1072-
if dset['byte_ordering'] == 1:
1073-
bo = '<'
1074-
else:
1075-
bo = '>'
1076-
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
1077-
# single precision - 4 bytes
1078-
values = np.asarray(struct.unpack('%c%sf' % (bo, int(len(split_data) / 4)), split_data), 'd')
1079-
else:
1080-
# double precision - 8 bytes
1081-
values = np.asarray(struct.unpack('%c%sd' % (bo, int(len(split_data) / 8)), split_data), 'd')
1080+
if header_only:
1081+
# If not reading data, just set placeholders
1082+
dset['x'] = None
1083+
dset['data'] = None
10821084
else:
1083-
values = []
1084-
split_data = block_data.decode('utf-8', errors='replace').splitlines(True)[13:]
1085-
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
1086-
for line in split_data[:-1]: # '6E13.5'
1087-
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13)])
1088-
else:
1089-
line = split_data[-1]
1090-
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13) if line[13 * i:13 * (i + 1)]!=' '])
1091-
elif ((dset['ord_data_type'] == 4) or (dset['ord_data_type'] == 6)) and (dset['abscissa_spacing'] == 1):
1092-
for line in split_data: # '4E20.12'
1093-
values.extend([float(line[20 * i:20 * (i + 1)]) for i in range(len(line) // 20)])
1094-
elif (dset['ord_data_type'] == 4) and (dset['abscissa_spacing'] == 0):
1095-
for line in split_data: # 2(E13.5,E20.12)
1096-
values.extend(
1097-
[float(line[13 * (i + j) + 20 * (i):13 * (i + 1) + 20 * (i + j)]) \
1098-
for i in range(len(line) // 33) for j in [0, 1]])
1099-
elif (dset['ord_data_type'] == 6) and (dset['abscissa_spacing'] == 0):
1100-
for line in split_data: # 1E13.5,2E20.12
1101-
values.extend([float(line[0:13]), float(line[13:33]), float(line[33:53])])
1102-
else:
1103-
raise Exception('Error reading data-set #58b; not proper data case.')
1104-
1105-
values = np.asarray(values)
1106-
# values = np.asarray([float(str) for str in split_data],'d')
1107-
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 4):
1108-
# Non-complex ordinate data
1109-
if (dset['abscissa_spacing'] == 0):
1110-
# Uneven abscissa
1111-
dset['x'] = values[:-1:2].copy()
1112-
dset['data'] = values[1::2].copy()
1113-
else:
1114-
# Even abscissa
1115-
n_val = len(values)
1116-
min_val = dset['abscissa_min']
1117-
d = dset['abscissa_inc']
1118-
dset['x'] = min_val + np.arange(n_val) * d
1119-
dset['data'] = values.copy()
1120-
elif (dset['ord_data_type'] == 5) or (dset['ord_data_type'] == 6):
1121-
# Complex ordinate data
1122-
if (dset['abscissa_spacing'] == 0):
1123-
# Uneven abscissa
1124-
dset['x'] = values[:-2:3].copy()
1125-
dset['data'] = values[1:-1:3] + 1.j * values[2::3]
1085+
if binary:
1086+
try:
1087+
split_data = b''.join(block_data.splitlines(True)[13:])
1088+
if dset['byte_ordering'] == 1:
1089+
bo = '<'
1090+
else:
1091+
bo = '>'
1092+
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
1093+
# single precision - 4 bytes
1094+
values = np.asarray(struct.unpack('%c%sf' % (bo, int(len(split_data) / 4)), split_data), 'd')
1095+
else:
1096+
# double precision - 8 bytes
1097+
values = np.asarray(struct.unpack('%c%sd' % (bo, int(len(split_data) / 8)), split_data), 'd')
1098+
except:
1099+
raise Exception('Potentially wrong data format (common with binary files from some commercial softwares). Try using pyuff.fix_58b() to fix your file. For more information, see https://github.com/ladisk/pyuff/issues/61')
11261100
else:
1127-
# Even abscissa
1128-
n_val = len(values) / 2
1129-
min_val = dset['abscissa_min']
1130-
d = dset['abscissa_inc']
1131-
dset['x'] = min_val + np.arange(n_val) * d
1132-
dset['data'] = values[0:-1:2] + 1.j * values[1::2]
1133-
del values
1101+
values = []
1102+
split_data = block_data.decode('utf-8', errors='replace').splitlines(True)[13:]
1103+
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 5):
1104+
for line in split_data[:-1]: # '6E13.5'
1105+
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13)])
1106+
else:
1107+
line = split_data[-1]
1108+
values.extend([float(line[13 * i:13 * (i + 1)]) for i in range(len(line) // 13) if line[13 * i:13 * (i + 1)]!=' '])
1109+
elif ((dset['ord_data_type'] == 4) or (dset['ord_data_type'] == 6)) and (dset['abscissa_spacing'] == 1):
1110+
for line in split_data: # '4E20.12'
1111+
values.extend([float(line[20 * i:20 * (i + 1)]) for i in range(len(line) // 20)])
1112+
elif (dset['ord_data_type'] == 4) and (dset['abscissa_spacing'] == 0):
1113+
for line in split_data: # 2(E13.5,E20.12)
1114+
values.extend(
1115+
[float(line[13 * (i + j) + 20 * (i):13 * (i + 1) + 20 * (i + j)]) \
1116+
for i in range(len(line) // 33) for j in [0, 1]])
1117+
elif (dset['ord_data_type'] == 6) and (dset['abscissa_spacing'] == 0):
1118+
for line in split_data: # 1E13.5,2E20.12
1119+
values.extend([float(line[0:13]), float(line[13:33]), float(line[33:53])])
1120+
else:
1121+
raise Exception('Error reading data-set #58b; not proper data case.')
1122+
1123+
values = np.asarray(values)
1124+
# values = np.asarray([float(str) for str in split_data],'d')
1125+
if (dset['ord_data_type'] == 2) or (dset['ord_data_type'] == 4):
1126+
# Non-complex ordinate data
1127+
if (dset['abscissa_spacing'] == 0):
1128+
# Uneven abscissa
1129+
dset['x'] = values[:-1:2].copy()
1130+
dset['data'] = values[1::2].copy()
1131+
else:
1132+
# Even abscissa
1133+
n_val = len(values)
1134+
min_val = dset['abscissa_min']
1135+
d = dset['abscissa_inc']
1136+
dset['x'] = min_val + np.arange(n_val) * d
1137+
dset['data'] = values.copy()
1138+
elif (dset['ord_data_type'] == 5) or (dset['ord_data_type'] == 6):
1139+
# Complex ordinate data
1140+
if (dset['abscissa_spacing'] == 0):
1141+
# Uneven abscissa
1142+
dset['x'] = values[:-2:3].copy()
1143+
dset['data'] = values[1:-1:3] + 1.j * values[2::3]
1144+
else:
1145+
# Even abscissa
1146+
n_val = len(values) / 2
1147+
min_val = dset['abscissa_min']
1148+
d = dset['abscissa_inc']
1149+
dset['x'] = min_val + np.arange(n_val) * d
1150+
dset['data'] = values[0:-1:2] + 1.j * values[1::2]
1151+
del values
11341152
except:
11351153
raise Exception('Error reading data-set #58b')
11361154
return dset
@@ -1484,3 +1502,66 @@ def prepare_58(
14841502

14851503

14861504
return dataset
1505+
1506+
1507+
def fix_58b(filename,fixed_filename=None):
1508+
"""
1509+
Opens the UFF file, fixes a common formatting issue and saves the fixed file.
1510+
Specifically, it fixes the instance, when closing ' -1' of the dataset is on its own line, and not right after the data.
1511+
1512+
:param filename: filename of the UFF file to be fixed
1513+
:param filename: filename to write the fixed UFF file, if None, the fixed file will be saved as 'filename_fixed.uff'
1514+
"""
1515+
1516+
if not os.path.exists(filename):
1517+
raise Exception('Filename does not exist')
1518+
try:
1519+
# Open the file in binary read mode
1520+
with open(filename, 'rb') as fh:
1521+
data = fh.read()
1522+
except Exception as e:
1523+
raise Exception(f'Cannot access the file {filename}: {e}')
1524+
else:
1525+
try:
1526+
lines = data.splitlines(keepends=True)
1527+
1528+
# Fix 1: Adjust ending ' -1' line
1529+
if len(lines) >= 1 and lines[-1].strip() == b'-1':
1530+
if len(lines) >= 2:
1531+
# Move ' -1' up to the end of the previous line
1532+
prev_line = lines[-2].rstrip(b'\r\n')
1533+
prev_line += b' -1' + lines[-1][-1:] # Keep the newline character
1534+
lines[-2] = prev_line
1535+
lines.pop() # Remove the last line
1536+
else:
1537+
pass
1538+
1539+
# Fix 2: Adjust 'data\n -1\n -1\n data' patterns
1540+
i = 0
1541+
while i < len(lines) - 3:
1542+
if (lines[i+1].strip() == b'-1' and lines[i+2].strip() == b'-1'):
1543+
# Move ' -1' from lines[i+1] to the end of lines[i]
1544+
data_line = lines[i].rstrip(b'\r\n') # Remove newline characters
1545+
data_line += b' -1' + lines[i+1][-1:] # Add ' -1' and newline
1546+
lines[i] = data_line
1547+
del lines[i+1] # Remove the now-empty line
1548+
# Do not increment i to recheck the new line at position i
1549+
else:
1550+
i += 1 # Move to the next line
1551+
1552+
# Reassemble the data
1553+
data = b''.join(lines)
1554+
1555+
1556+
# Write the fixed data back to the file
1557+
if fixed_filename is None:
1558+
base, ext = os.path.splitext(filename)
1559+
new_filename = f"{base}_fixed{ext}" #default filename
1560+
else:
1561+
new_filename = fixed_filename #custom filename
1562+
with open(new_filename, 'wb') as fh:
1563+
fh.write(data)
1564+
print('fixed file saved as:', new_filename)
1565+
except Exception as e:
1566+
raise Exception(f'Error fixing UFF file: {filename}: {e}')
1567+

pyuff/pyuff.py

+22-13
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
from .datasets.dataset_15 import _write15, _extract15, get_structure_15
5050
from .datasets.dataset_18 import _extract18, get_structure_18
5151
from .datasets.dataset_55 import _write55, _extract55, get_structure_55
52-
from .datasets.dataset_58 import _write58, _extract58, get_structure_58
52+
from .datasets.dataset_58 import _write58, _extract58, get_structure_58, fix_58b
5353
from .datasets.dataset_82 import _write82, _extract82, get_structure_82
5454
from .datasets.dataset_151 import _write151, _extract151, get_structure_151
5555
from .datasets.dataset_164 import _write164, _extract164, get_structure_164
@@ -230,17 +230,20 @@ def refresh(self):
230230
fh.close()
231231
return self._refreshed
232232

233-
def read_sets(self, setn=None):
233+
def read_sets(self, setn=None, header_only=False):
234234
"""
235-
Reads sets from the list or array ``setn``. If ``setn=None``, all
236-
sets are read (default). Sets are numbered starting at 0, ending at
237-
n-1. The method returns a list of dset dictionaries - as
238-
many dictionaries as there are sets. Unknown data-sets are returned
239-
empty.
235+
Reads sets.
240236
237+
The method returns a list of dset dictionaries - as many dictionaries as there are sets.
238+
Unknown data-sets are returned empty.
241239
User must be sure that, since the last reading/writing/refreshing,
242240
the data has not changed by some other means than through the
243241
UFF object.
242+
243+
:param setn: None(default), all sets are read in None (default).
244+
If a number is given, then only a particular set is read.
245+
:param header_only: False (default), if True header is read, only
246+
This usefull for large files.
244247
"""
245248
dset = []
246249
if setn is None:
@@ -257,11 +260,12 @@ def read_sets(self, setn=None):
257260
raise Exception('Cannot read from the file: ' + self._filename)
258261
try:
259262
for ii in read_range:
260-
dset.append(self._read_set(ii))
263+
dset.append(self._read_set(ii, header_only=header_only))
261264
except Exception as msg:
262-
raise Exception('Error when reading ' + str(ii) + '-th data-set: ' + msg.value)
263-
except:
264-
raise Exception('Error when reading data-set(s)')
265+
if hasattr(msg, 'value'):
266+
raise Exception('Error when reading ' + str(ii) + '-th data-set: ' + msg.value)
267+
else:
268+
raise Exception('Error when reading data-set(s).')
265269
if len(dset) == 1:
266270
dset = dset[0]
267271
return dset
@@ -306,13 +310,18 @@ def write_sets(self, dsets, mode='add', force_double=True):
306310
else:
307311
raise Exception('Unknown mode: ' + mode)
308312

309-
def _read_set(self, n):
313+
def _read_set(self, n, header_only=False):
310314
"""
311315
Reads n-th set from UFF file.
316+
312317
n can be an integer between 0 and n_sets-1.
318+
313319
User must be sure that, since the last reading/writing/refreshing,
314320
the data has not changed by some other means than through the
315321
UFF object. The method returns dset dictionary.
322+
323+
:param header_only: False (default), if True header is read, only
324+
This usefull for large files.
316325
"""
317326

318327
dset = {}
@@ -351,7 +360,7 @@ def _read_set(self, n):
351360
elif self._set_types[int(n)] == 55:
352361
dset = _extract55(block_data)
353362
elif self._set_types[int(n)] == 58:
354-
dset = _extract58(block_data)
363+
dset = _extract58(block_data, header_only=header_only)
355364
elif self._set_types[int(n)] == 82:
356365
dset = _extract82(block_data)
357366
elif self._set_types[int(n)] == 151:

0 commit comments

Comments
 (0)