Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
779ce66
Fix horrible typo
Mar 29, 2021
18a8343
Add standalone validation feature
Mar 29, 2021
d0469ae
Correctly extract querystring parameter for PUT
Mar 29, 2021
c0398e3
Raise the correct exception when flag is not set
Mar 29, 2021
a9ff256
Improve comment
Jun 25, 2021
b36f701
Fix stupid logic
Mar 29, 2021
7759380
Stop earlier when flag not set
Jun 25, 2021
ab22be7
Add tests for validation flow.
Jun 30, 2021
8218ac0
Some cleanup
Jun 30, 2021
007676f
Test _validation_model field for clean
Jul 22, 2021
e0510c6
Remove incorrect argument from full_clean
Jul 22, 2021
26cf0ed
Add tests
Jul 22, 2021
6c18996
do not write to a new worksheet. Take the default worksheet instead
Jul 23, 2021
caa3009
update doc
Jul 23, 2021
4a5ef47
Add first attempt at getting mssql support working
Jul 26, 2021
027237c
add support fo nullable foreign keys in CSVexport plugin
Sep 17, 2021
ed3b0db
Do 1 query per with instead of a big combined one to prevent exponent…
Jan 18, 2022
180b0b9
Do not use aggregates at all
Jan 18, 2022
c527280
Do not use join for reverse relations
Jan 19, 2022
15263ca
Do 1 query per with instead of a big combined one to prevent exponent…
Jan 18, 2022
6dd7677
Do not use aggregates at all
Jan 18, 2022
d6b11e6
Do not use join for reverse relations
Jan 19, 2022
33ea43f
Add webpage model
Feb 18, 2022
e8cbec2
Implement html field
Feb 18, 2022
ebae10f
Fix flake issues
Feb 18, 2022
6c6811e
Add gettext for html field
Mar 2, 2022
d1ff37f
finish sentence
Mar 2, 2022
ba50109
Add test for nested attributes & also test the content of the error m…
Mar 2, 2022
a3f23dd
Merge multiple errors in HTMLField
Mar 3, 2022
8d895ef
Add noreferrer noopener check for links
Mar 3, 2022
f51af6d
linting
Mar 8, 2022
1d12ae0
fix merge conflict
Mar 10, 2022
0a7de13
Fix crash on non gotten FKs
Dec 6, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 25 additions & 24 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,28 +4,13 @@ on: push

jobs:
check:
runs-on: ubuntu-latest

strategy:
matrix:
# Testing Python 3.7 (deb 10), Python 3.9 (deb 11), Python 3.11 (deb 12)
python-version: ["3.9", "3.11"]
django-version: ["3.2.25", "4.2.17", "5.1.4"]
database-engine: ["postgres", "mysql"]
os: [ubuntu-latest]
include:
# 3.7 cannot run on latest ubuntu
- python-version: 3.7
django-version: 3.2.25
database-engine: postgres
os: ubuntu-22.04
- python-version: 3.7
django-version: 3.2.25
database-engine: mysql
os: ubuntu-22.04
exclude:
- python-version: 3.9
django-version: 5.1.4

runs-on: ${{ matrix.os }}
python-version: [ "3.7", "3.8" ]
django-version: [ "2.1.1", "3.1.4" ]
database-engine: [ "postgres", "mysql", "mssql"]

services:
postgres:
Expand All @@ -52,19 +37,35 @@ jobs:
ports:
- 3306:3306


mssqldb:
image: mcr.microsoft.com/mssql/server:2017-latest
env:
ACCEPT_EULA: y
SA_PASSWORD: Test


steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2

- name: Setup python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}

- name: Retrieve cached venv
uses: actions/cache@v1
id: cache-venv
with:
path: ./.venv/
key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.django-version }}-venv-${{ hashFiles('ci-requirements.txt') }}

- name: Install requirements
run: |
python -m venv .venv
.venv/bin/pip install django==${{ matrix.django-version }} -r ci-requirements.txt
.venv/bin/pip install -qr ci-requirements.txt django==${{ matrix.django-version }}
if: steps.cache-venv.outputs.cache-hit != 'true'

- name: Run linting
run: .venv/bin/flake8 binder
Expand All @@ -82,7 +83,7 @@ jobs:

- name: Run tests
run: |
.venv/bin/coverage run --include="binder/*" -m unittest discover -vt . -s tests
.venv/bin/coverage run --include="binder/*" setup.py test
env:
BINDER_TEST_MYSQL: ${{ matrix.database-engine == 'mysql' && 1 || 0 }}
CY_RUNNING_INSIDE_CI: 1
Expand Down
12 changes: 12 additions & 0 deletions binder/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,3 +235,15 @@ def __add__(self, other):
else:
errors[model] = other.errors[model]
return BinderValidationError(errors)


class BinderSkipSave(BinderException):
"""Used to abort the database transaction when validation was successfull.
Validation is possible when saving (post, put, multi-put) or deleting models."""

http_code = 200
code = 'SkipSave'

def __init__(self):
super().__init__()
self.fields['message'] = 'No validation errors were encountered.'
8 changes: 6 additions & 2 deletions binder/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -617,8 +617,12 @@ class Meta:
abstract = True
ordering = ['pk']

def save(self, *args, **kwargs):
self.full_clean() # Never allow saving invalid models!
def save(self, *args, only_validate=False, **kwargs):
# A validation model might not require all validation checks as it is not a full model
# _validation_model can be used to skip validation checks that are meant for complete models that are actually being saved
self._validation_model = only_validate # Set the model as a validation model when we only want to validate the model

self.full_clean() # Never allow saving invalid models!
return super().save(*args, **kwargs)


Expand Down
71 changes: 35 additions & 36 deletions binder/plugins/views/csvexport.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ class ExportFileAdapter:
"""
__metaclass__ = abc.ABCMeta

def __init__(self, request: HttpRequest):
def __init__(self, request: HttpRequest, csv_settings: 'CsvExportView.CsvExportSettings'):
self.csv_settings = csv_settings
self.request = request

@abc.abstractmethod
Expand Down Expand Up @@ -69,8 +70,8 @@ class CsvFileAdapter(ExportFileAdapter):
Adapter for returning CSV files
"""

def __init__(self, request: HttpRequest):
super().__init__(request)
def __init__(self, request: HttpRequest, csv_settings: 'CsvExportView.CsvExportSettings'):
super().__init__(request, csv_settings)
self.response = HttpResponse(content_type='text/csv')
self.file_name = 'export'
self.writer = csv.writer(self.response)
Expand All @@ -79,7 +80,7 @@ def set_file_name(self, file_name: str):
self.file_name = file_name

def set_columns(self, columns: List[str]):
self.add_row(columns)
self.writer.writerow(list(map(lambda x: x[1], self.csv_settings.column_map)))

def add_row(self, values: List[str]):
self.writer.writerow(values)
Expand All @@ -91,10 +92,10 @@ def get_response(self) -> HttpResponse:

class ExcelFileAdapter(ExportFileAdapter):
"""
Adapter for returning excel files
Adapter fore returning excel files
"""
def __init__(self, request: HttpRequest):
super().__init__(request)
def __init__(self, request: HttpRequest, csv_settings: 'CsvExportView.CsvExportSettings'):
super().__init__(request, csv_settings)

# Import pandas locally. This means that you can use the CSV adapter without using pandas
import openpyxl
Expand All @@ -103,7 +104,7 @@ def __init__(self, request: HttpRequest):
# self.writer = self.pandas.ExcelWriter(self.response)

self.work_book = self.openpyxl.Workbook()
self.sheet = self.work_book.active
self.sheet = self.work_book._sheets[0]

# The row number we are currently writing to
self._row_number = 0
Expand All @@ -115,7 +116,7 @@ def set_columns(self, columns: List[str]):
self.add_row(columns)

def add_row(self, values: List[str]):
for (column_id, value) in enumerate(values):
for (value, column_id) in zip(values, range(1000000)):
self.sheet.cell(column=column_id + 1, row=self._row_number + 1, value=value)
self._row_number += 1

Expand All @@ -129,9 +130,6 @@ def get_response(self) -> HttpResponse:
self.response['Content-Disposition'] = 'attachment; filename="{}.xlsx"'.format(self.file_name)
return self.response

DEFAULT_RESPONSE_TYPE_MAPPING = {
'xlsx': ExcelFileAdapter,
}

class RequestAwareAdapter(ExportFileAdapter):
"""
Expand All @@ -141,14 +139,14 @@ class RequestAwareAdapter(ExportFileAdapter):

returns a xlsx type
"""
def __init__(self, request: HttpRequest):
super().__init__(request)
def __init__(self, request: HttpRequest, csv_settings: 'CsvExportView.CsvExportSettings'):
super().__init__(request, csv_settings)

response_type_mapping = DEFAULT_RESPONSE_TYPE_MAPPING
response_type = request.GET.get('response_type', '').lower()
AdapterClass = response_type_mapping.get(response_type, CsvFileAdapter)

self.base_adapter = AdapterClass(request)
AdapterClass = CsvFileAdapter
if response_type == 'xlsx':
AdapterClass = ExcelFileAdapter
self.base_adapter = AdapterClass(request, csv_settings)

def set_file_name(self, file_name: str):
return self.base_adapter.set_file_name(file_name)
Expand All @@ -163,8 +161,6 @@ def get_response(self) -> HttpResponse:
return self.base_adapter.get_response()




class CsvExportView:
"""
This class adds another endpoint to the ModelView, namely GET model/download/. This does the same thing as getting a
Expand All @@ -182,7 +178,7 @@ class CsvExportSettings:
"""

def __init__(self, withs, column_map, file_name=None, default_file_name='download', multi_value_delimiter=' ',
extra_permission=None, extra_params={}, csv_adapter=RequestAwareAdapter, limit=10000):
extra_permission=None, csv_adapter=RequestAwareAdapter):
"""
@param withs: String[] An array of all the withs that are necessary for this csv export
@param column_map: Tuple[] An array, with all columns of the csv file in order. Each column is represented by a tuple
Expand All @@ -194,20 +190,14 @@ def __init__(self, withs, column_map, file_name=None, default_file_name='downloa
as delimiter between them. This may be if an array is returned, or if we have a one to many relation
@param extra_permission: String When set, an extra binder permission check will be done on this permission.
@param csv_adapter: Class. Either an object extending
@param response_type_mapping: Mapping between the parameter used in the custom response type
@param limit: Limit for amount of items in the csv. This is a fail save that you do not bring down the server with
a big query
"""
self.withs = withs
self.column_map = column_map
self.file_name = file_name
self.default_file_name = default_file_name
self.multi_value_delimiter = multi_value_delimiter
self.extra_permission = extra_permission
self.extra_params = extra_params
self.csv_adapter = csv_adapter
self.limit = limit


def _generate_csv_file(self, request: HttpRequest, file_adapter: CsvFileAdapter):

Expand All @@ -220,10 +210,8 @@ def _generate_csv_file(self, request: HttpRequest, file_adapter: CsvFileAdapter)
mutable = request.POST._mutable
request.GET._mutable = True
request.GET['page'] = 1
request.GET['limit'] = self.csv_settings.limit if self.csv_settings.limit is not None else 'none'
request.GET['limit'] = 10000
request.GET['with'] = ",".join(self.csv_settings.withs)
for key, value in self.csv_settings.extra_params.items():
request.GET[key] = value
request.GET._mutable = mutable

parent_result = self.get(request)
Expand Down Expand Up @@ -269,6 +257,8 @@ def get_datum(data, key, prefix=''):
if '.' not in key:
if key not in data:
raise Exception("{} not found in data: {}".format(key, data))
if type(data[key]) == list:
return self.csv_settings.multi_value_delimiter.join(data[key])
return data[key]
else:
"""
Expand All @@ -284,17 +274,28 @@ def get_datum(data, key, prefix=''):
head_key, subkey = key.split('.', 1)
if head_key in data:
new_prefix = '{}.{}'.format(prefix, head_key)
if isinstance(data[head_key], dict):
if type(data[head_key]) == dict:
return get_datum(data[head_key], subkey, new_prefix)
else:
# Assume that we have a mapping now
fk_ids = data[head_key]
if not isinstance(fk_ids, list):

if fk_ids is None:
# This case happens if we have a nullable foreign key that is null. Treat this as a many
# to one relation with no values.
fk_ids = []
elif type(fk_ids) != list:
fk_ids = [fk_ids]

# if head_key not in key_mapping:
prefix_key = parent_data['with_mapping'][new_prefix[1:]]
datums = [str(get_datum(key_mapping[prefix_key][fk_id], subkey, new_prefix)) for fk_id in fk_ids]
datums = []
for fk_id in fk_ids:
try:
datums.append(str(get_datum(key_mapping[prefix_key][fk_id], subkey, new_prefix)))
except KeyError:
pass
# datums = [str(get_datum(key_mapping[prefix_key][fk_id], subkey, new_prefix)) for fk_id in fk_ids]
return self.csv_settings.multi_value_delimiter.join(
datums
)
Expand All @@ -309,8 +310,6 @@ def get_datum(data, key, prefix=''):
if len(col_definition) >= 3:
transform_function = col_definition[2]
datum = transform_function(datum, row, key_mapping)
if isinstance(datum, list):
datum = self.csv_settings.multi_value_delimiter.join(datum)
data.append(datum)
file_adapter.add_row(data)

Expand All @@ -325,7 +324,7 @@ def download(self, request):
if self.csv_settings is None:
raise Exception('No csv settings set!')

file_adapter = self.csv_settings.csv_adapter(request)
file_adapter = self.csv_settings.csv_adapter(request, self.csv_settings)

self._generate_csv_file(request, file_adapter)

Expand Down
Loading