%pip install semantic-link-labs import sempy_labs as labs from sempy_labs import lakehouse as lake from sempy_labs import directlake import sempy_labs.report as rep dataset_name = '' workspace_name = None labs.vertipaq_analyzer(dataset=dataset_name, workspace=workspace_name) labs.vertipaq_analyzer(dataset=dataset_name, workspace=workspace_name, export='zip') labs.vertipaq_analyzer(dataset=dataset_name, workspace=workspace_name, export='table') labs.import_vertipaq_analyzer(folder_path='', file_name='') labs.run_model_bpa(dataset=dataset_name, workspace=workspace_name) labs.run_model_bpa(dataset=dataset_name, workspace=workspace_name, export=True) labs.run_model_bpa(dataset=dataset_name, workspace=workspace_name, language='italian') import sempy sempy.fabric._client._utils._init_analysis_services() import Microsoft.AnalysisServices.Tabular as TOM import pandas as pd dataset_name = '' workspace_name = '' rules = pd.DataFrame( [ ( "Performance", "Table", "Warning", "Rule name...", lambda obj, tom: tom.is_calculated_table(table_name=obj.Name), 'Rule description...', '', ), ( "Performance", "Column", "Warning", "Do not use floating point data types", lambda obj, tom: obj.DataType == TOM.DataType.Double, 'The "Double" floating point data type should be avoided, as it can result in unpredictable roundoff errors and decreased performance in certain scenarios. Use "Int64" or "Decimal" where appropriate (but note that "Decimal" is limited to 4 digits after the decimal sign).', ) ], columns=[ "Category", "Scope", "Severity", "Rule Name", "Expression", "Description", "URL", ], ) labs.run_model_bpa(dataset=dataset_name, workspace=workspace_name, rules=rules) labs.translate_semantic_model(dataset=dataset_name, workspace=workspace_name, languages=['italian', 'japanese', 'hindi'], exclude_characters='_') lake.get_lakehouse_tables(lakehouse=None, workspace=None, extended=True, count_rows=False) lake.get_lakehouse_tables(lakehouse=None, workspace=None, extended=True, count_rows=False, export=True) directlake.check_fallback_reason(dataset=dataset_name, workspace=workspace_name) lake.optimize_lakehouse_tables(tables=['', ''], lakehouse=None, workspace=None) directlake.warm_direct_lake_cache_isresident(dataset=dataset_name, workspace=workspace_name) directlake.warm_direct_lake_cache_perspective(dataset=dataset_name, workspace=workspace_name, perspective='', add_dependencies=True)