8000 Version 1.4.4 · mmrech/api_python@bc1806a · GitHub
[go: up one dir, main page]

Skip to content

Commit bc1806a

Browse files
author
Austin Zielman
committed
Version 1.4.4
1 parent 09fd313 commit bc1806a

File tree

239 files changed

+2088
-883
lines changed
  • agent_conversation
  • agent_data_document_info
  • agent_data_execution_result
  • agent_version
  • ai_building_task
  • algorithm
  • annotation
  • annotation_config
  • annotation_document
  • annotation_entry
  • annotations_status
  • api_class
  • api_client_utils
  • api_endpoint
  • api_key
  • app_user_group
  • application_connector
  • batch_prediction
  • batch_prediction_version
  • batch_prediction_version_logs
  • categorical_range_violation
  • chat_message
  • chat_session
  • chatllm_referral_invite
  • client
  • code_source
  • concatenation_config
  • cpu_gpu_memory_specs
  • cryptography
  • custom_chat_instructions
  • custom_loss_function
  • custom_metric
  • custom_metric_version
  • custom_train_function_info
  • data_consistency_duplication
  • data_metrics
  • data_prep_logs
  • data_quality_results
  • data_upload_result
  • database_column_feature_mapping
  • database_connector
  • database_connector_column
  • database_connector_schema
  • dataset
  • dataset_column
  • dataset_version
  • dataset_version_logs
  • deployment
  • deployment_auth_token
  • deployment_conversation
  • deployment_conversation_event
  • deployment_conversation_export
  • deployment_statistics
  • document_data
  • document_retriever
  • document_retriever_config
  • document_retriever_lookup_result
  • document_retriever_version
  • drift_distribution
  • drift_distributions
  • eda
  • eda_chart_description
  • eda_collinearity
  • eda_data_consistency
  • eda_feature_association
  • eda_feature_collinearity
  • eda_forecasting_analysis
  • eda_version
  • embedding_feature_drift_distribution
  • execute_feature_group_operation
  • external_application
  • external_invite
  • extracted_fields
  • feature
  • feature_distribution
  • feature_drift_record
  • feature_drift_summary
  • feature_group
  • feature_group_document
  • feature_group_export
  • feature_group_export_config
  • feature_group_export_download_url
  • feature_group_lineage
  • feature_group_refresh_export_config
  • feature_group_row
  • feature_group_row_process
  • feature_group_row_process_logs
  • feature_group_row_process_summary
  • feature_group_template
  • feature_group_template_variable_options
  • feature_group_version
  • feature_group_version_logs
  • feature_importance
  • feature_mapping
  • feature_performance_analysis
  • feature_record
  • file_connector
  • file_connector_instructions
  • file_connector_verification
  • finetuned_pretrained_model
  • forecasting_analysis_graph_data
  • forecasting_monitor_item_analysis
  • forecasting_monitor_summary
  • function_logs
  • generated_pit_feature_config_option
  • global_context
  • graph_dashboard
  • holdout_analysis
  • holdout_analysis_version
  • hosted_model_token
  • indexing_config
  • inferred_database_column_to_feature_mappings
  • inferred_feature_mappings
  • item_statistics
  • llm_app
  • llm_code_block
  • llm_execution_preview
  • llm_execution_result
  • llm_generated_code
  • llm_input
  • llm_parameters
  • llm_response
  • memory_options
  • messaging_connector_response
  • model
  • model_artifacts_export
  • model_blueprint_export
  • model_blueprint_stage
  • model_location
  • model_metrics
  • model_monitor
  • model_monitor_org_summary
  • model_monitor_summary
  • model_monitor_summary_from_org
  • model_monitor_version
  • model_monitor_version_metric_data
  • model_training_type_for_deployment
  • model_upload
  • model_version
  • model_version_feature_group_schema
  • modification_lock_info
  • module
  • monitor_alert
  • monitor_alert_version
  • monitor_drift_and_distributions
  • natural_language_explanation
  • nested_feature
  • nested_feature_schema
  • null_violation
  • organization_external_application_settings
  • organization_group
  • organization_search_result
  • organization_secret
  • page_data
  • pipeline
  • pipeline_reference
  • pipeline_step
  • pipeline_step_version
  • pipeline_step_version_logs
  • pipeline_step_version_reference
  • pipeline_version
  • pipeline_version_logs
  • point_in_time_feature
  • point_in_time_feature_info
  • point_in_time_group
  • point_in_time_group_feature
  • prediction_client
  • prediction_dataset
  • prediction_feature_group
  • prediction_input
  • prediction_log_record
  • prediction_operator
  • prediction_operator_version
  • problem_type
  • project
  • project_config
  • project_feature_group
  • project_feature_group_schema
  • project_feature_group_schema_version
  • project_validation
  • python_function
  • python_function_validator
  • python_plot_function
  • range_violation
  • realtime_monitor
  • refresh_pipeline_run
  • refresh_policy
  • refresh_schedule
  • resolved_feature_group_template
  • return_class
  • schema
  • streaming_auth_token
  • streaming_client
  • streaming_connector
  • streaming_row_count
  • streaming_sample_code
  • test_point_predictions
  • tone_details
  • training_config_options
  • upload
  • upload_part
  • use_case
  • use_case_requirements
  • user
  • user_exception
  • web_search_response
  • web_search_result
  • webhook
  • Some content is hidden

    Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

    239 files changed

    +2088
    -883
    lines changed

    abacusai/__init__.py

    Lines changed: 1 addition & 1 deletion
    Original file line numberDiff line numberDiff line change
    @@ -4,4 +4,4 @@
    44
    from .streaming_client import StreamingClient
    55

    66

    7-
    __version__ = "1.4.3"
    7+
    __version__ = "1.4.4"

    abacusai/api_class/ai_agents.py

    Lines changed: 4 additions & 4 deletions
    Original file line numberDiff line numberDiff line change
    @@ -126,7 +126,7 @@ class WorkflowNodeInputMapping(ApiClass):
    126126
    def to_dict(self):
    127127
    return {
    128128
    'name': self.name,
    129-
    'variable_type': self.variable_type,
    129+
    'variable_type': self.variable_type.value,
    130130
    'variable_source': self.variable_source,
    131131
    'is_required': self.is_required
    132132
    }
    @@ -139,7 +139,7 @@ def from_dict(cls, mapping: dict):
    139139
    raise ValueError('input_mapping', f'Invalid workflow node input mapping "{mapping}". Must contain keys - name, variable_type')
    140140
    return cls(
    141141
    name=mapping['name'],
    142-
    variable_type=mapping['variable_type'],
    142+
    variable_type=enums.WorkflowNodeInputType(mapping['variable_type']),
    143143
    variable_source=mapping.get('variable_source'),
    144144
    is_required=mapping.get('is_required', True)
    145145
    )
    @@ -160,7 +160,7 @@ class WorkflowNodeOutputMapping(ApiClass):
    160160
    def to_dict(self):
    161161
    return {
    162162
    'name': self.name,
    163-
    'variable_type': self.variable_type
    163+
    'variable_type': self.variable_type.value
    164164
    }
    165165

    166166
    @classmethod
    @@ -171,7 +171,7 @@ def from_dict(cls, mapping: dict):
    171171
    raise ValueError('output_mapping', 'Invalid workflow node output mapping. Must contain keys - name.')
    172172
    return cls(
    173173
    name=mapping['name'],
    174-
    variable_type=mapping.get('variable_type', enums.WorkflowNodeOutputType.STRING)
    174+
    variable_type=enums.WorkflowNodeOutputType(mapping.get('variable_type', 'STRING'))
    175175
    )
    176176

    177177

    abacusai/api_class/enums.py

    Lines changed: 3 additions & 11 deletions
    Original file line numberDiff line numberDiff line change
    @@ -422,6 +422,7 @@ class PythonFunctionOutputArgumentType(ApiEnum):
    422422
    class VectorStoreTextEncoder(ApiEnum):
    423423
    E5 = 'E5'
    424424
    OPENAI = 'OPENAI'
    425+
    OPENAI_LARGE = 'OPENAI_LARGE'
    425426
    SENTENCE_BERT = 'SENTENCE_BERT'
    426427
    E5_SMALL = 'E5_SMALL'
    427428
    CODE_BERT = 'CODE_BERT'
    @@ -432,28 +433,19 @@ class LLMName(ApiEnum):
    432433
    OPENAI_GPT4_32K = 'OPENAI_GPT4_32K'
    433434
    OPENAI_GPT4_128K = 'OPENAI_GPT4_128K'
    434435
    OPENAI_GPT4_128K_LATEST = 'OPENAI_GPT4_128K_LATEST'
    435-
    OPENAI_GPT4_VISION = 'OPENAI_GPT4_VISION'
    436436
    OPENAI_GPT4O = 'OPENAI_GPT4O'
    437437
    OPENAI_GPT4O_MINI = 'OPENAI_GPT4O_MINI'
    438438
    OPENAI_GPT3_5 = 'OPENAI_GPT3_5'
    439439
    OPENAI_GPT3_5_TEXT = 'OPENAI_GPT3_5_TEXT'
    440440
    LLAMA3_1_405B = 'LLAMA3_1_405B'
    441+
    LLAMA3_1_70B = 'LLAMA3_1_70B'
    442+
    LLAMA3_1_8B = 'LLAMA3_1_8B'
    441443
    LLAMA3_LARGE_CHAT = 'LLAMA3_LARGE_CHAT'
    442-
    GROQ_LLAMA3_LARGE_CHAT = 'GROQ_LLAMA3_LARGE_CHAT'
    443-
    CLAUDE_V2_1 = 'CLAUDE_V2_1'
    444444
    CLAUDE_V3_OPUS = 'CLAUDE_V3_OPUS'
    445445
    CLAUDE_V3_SONNET = 'CLAUDE_V3_SONNET'
    446446
    CLAUDE_V3_HAIKU = 'CLAUDE_V3_HAIKU'
    447447
    CLAUDE_V3_5_SONNET = 'CLAUDE_V3_5_SONNET'
    448-
    ABACUS_GIRAFFE = 'ABACUS_GIRAFFE'
    449-
    ABACUS_GIRAFFE_LARGE = 'ABACUS_GIRAFFE_LARGE'
    450-
    LLAMA2_CHAT = 'LLAMA2_CHAT'
    451-
    PALM = 'PALM'
    452-
    PALM_TEXT = 'PALM_TEXT'
    453-
    GEMINI_PRO = 'GEMINI_PRO'
    454448
    GEMINI_1_5_PRO = 'GEMINI_1_5_PRO'
    455-
    MIXTRAL_CHAT = 'MIXTRAL_CHAT'
    456-
    MISTRAL_MEDIUM = 'MISTRAL_MEDIUM'
    457449
    ABACUS_SMAUG3 = 'ABACUS_SMAUG3'
    458450
    GEMINI_1_5_FLASH = 'GEMINI_1_5_FLASH'
    459451

    abacusai/api_class/model.py

    Lines changed: 6 additions & 2 deletions
    Original file line numberDiff line numberDiff line change
    @@ -460,15 +460,16 @@ class ChatLLMTrainingConfig(TrainingConfig):
    460460
    filter_columns (list): Allow users to filter the document retrievers on these metadata columns.
    461461
    include_general_knowledge (bool): Allow the LLM to rely not just on RAG search results, but to fall back on general knowledge. Disabled by default.
    462462
    enable_web_search (bool) : Allow the LLM to use Web Search Engines to retrieve information for better results.
    463-
    behavior_instructions (str): Customize the overall role instructions for the LLM.
    464-
    response_instructions (str): Customized instructions for how the LLM should respond.
    463+
    behavior_instructions (str): Customize the overall behaviour of the model. This controls things like - when to execute code (if enabled), write sql query, search web (if enabled), etc.
    464+
    response_instructions (str): Customized instructions for how the model should respond.
    465465
    enable_llm_rewrite (bool): If enabled, an LLM will rewrite the RAG queries sent to document retriever. Disabled by default.
    466466
    column_filtering_instructions (str): Instructions for a LLM call to automatically generate filter expressions on document metadata to retrieve relevant documents for the conversation.
    467467
    keyword_requirement_instructions (str): Instructions for a LLM call to automatically generate keyword requirements to retrieve relevant documents for the conversation.
    468468
    query_rewrite_instructions (str): Special instructions for the LLM which rewrites the RAG query.
    469469
    max_search_results (int): Maximum number of search results in the retrieval augmentation step. If we know that the questions are likely to have snippets which are easily matched in the documents, then a lower number will help with accuracy.
    470470
    data_feature_group_ids: (List[str]): List of feature group IDs to use to possibly query for the ChatLLM. The created ChatLLM is commonly referred to as DataLLM.
    471471
    data_prompt_context (str): Prompt context for the data feature group IDs.
    472+
    data_prompt_table_context (Dict[str, str]): Dict of table name and table context pairs to provide table wise context for each structured data table.
    472473
    hide_sql_and_code (bool): When running data queries, this will hide the generated SQL and Code in the response.
    473474
    disable_data_summarization (bool): After executing a query summarize the reponse and reply back with only the table and query run.
    474475
    data_columns_to_ignore (List[str]): Columns to ignore while encoding information about structured data tables in context for the LLM. A list of strings of format "<table_name>.<column_name>"
    @@ -478,6 +479,7 @@ class ChatLLMTrainingConfig(TrainingConfig):
    478479
    database_connector_tables (List[str]): List of tables to use from the database connector for the ChatLLM.
    479480
    enable_code_execution (bool): Enable python code execution in the ChatLLM. This equips the LLM with a python kernel in which all its code is executed.
    480481
    enable_response_caching (bool): Enable caching of LLM responses to speed up response times and improve reproducibility.
    482+
    unknown_answer_phrase (str): Fallback response when the LLM can't find an answer.
    481483
    """
    482484
    document_retrievers: List[str] = dataclasses.field(default=None)
    483485
    num_completion_tokens: int = dataclasses.field(default=None)
    @@ -495,6 +497,7 @@ class ChatLLMTrainingConfig(TrainingConfig):
    495497
    max_search_results: int = dataclasses.field(default=None)
    496498
    data_feature_group_ids: List[str] = dataclasses.field(default=None)
    497499
    data_prompt_context: str = dataclasses.field(default=None)
    500+
    data_prompt_table_context: Dict[str, str] = dataclasses.field(default=None)
    498501
    hide_sql_and_code: bool = dataclasses.field(default=None)
    499502
    disable_data_summarization: bool = dataclasses.field(default=None)
    500503
    data_columns_to_ignore: List[str] = dataclasses.field(default=None)
    @@ -506,6 +509,7 @@ class ChatLLMTrainingConfig(TrainingConfig):
    506509
    metadata_columns: list = dataclasses.field(default=None, metadata={'deprecated': True})
    507510
    lookup_rewrite_instructions: str = dataclasses.field(default=None, metadata={'deprecated': True})
    508511
    enable_response_caching: bool = dataclasses.field(default=None)
    512+
    unknown_answer_phrase: str = dataclasses.field(default=None)
    509513

    510514
    def __post_init__(self):
    511515
    self.problem_type = enums.ProblemType.CHAT_LLM

    abacusai/api_client_utils.py

    Lines changed: 188 additions & 0 deletions
    Original file line numberDiff line numberDiff line change
    @@ -180,6 +180,194 @@ def load_as_pandas_from_avro_files(files: List[str], download_method: Callable,
    180180
    return data_df
    181181

    182182

    183+
    def validate_workflow_node_inputs(nodes_info, agent_workflow_node_id, keyword_arguments: dict, sample_user_inputs: dict, filtered_workflow_vars: dict):
    184+
    from .api_class import WorkflowNodeInputType
    185+
    input_mappings = nodes_info[agent_workflow_node_id].get(
    186+
    'input_mappings', {})
    187+
    input_schema = nodes_info[agent_workflow_node_id].get('input_schema', {})
    188+
    if input_schema.get('runtime_schema', False):
    189+
    keyword_arguments = {input_schema.get(
    190+
    'schema_prop'): keyword_arguments}
    191+
    for input_mapping in input_mappings:
    192+
    input_name = input_mapping['name']
    193+
    variable_type = input_mapping['variable_type']
    194+
    is_required = input_mapping.get('is_required', True)
    195+
    variable_source = input_mapping['variable_source']
    196+
    if variable_type == 'WORKFLOW_VARIABLE':
    197+
    if variable_source not in filtered_workflow_vars:
    198+
    raise ValueError(
    199+
    f'The stage corresponding to "{agent_workflow_node_id}" requires variables from {variable_source} stage which are not there.')
    200+
    if input_name not in filtered_workflow_vars[variable_source] and is_required:
    201+
    raise ValueError(
    202+
    f'Missing required input "{input_name}" in workflow vars for workflow node "{agent_workflow_node_id}".')
    203+
    else:
    204+
    keyword_arguments[input_name] = filtered_workflow_vars[variable_source][input_name]
    205+
    elif variable_type == WorkflowNodeInputType.USER_INPUT:
    206+
    if sample_user_inputs and input_name in sample_user_inputs:
    207+
    keyword_arguments[input_name] = sample_user_inputs[input_name]
    208+
    elif variable_source in filtered_workflow_vars and input_name in filtered_workflow_vars[variable_source]:
    209+
    keyword_arguments[input_name] = filtered_workflow_vars[variable_source][input_name]
    210+
    else:
    211+
    if is_required:
    212+
    raise ValueError(
    213+
    f'User input for "{input_name}" is required for the "{agent_workflow_node_id}" node.')
    214+
    else:
    215+
    keyword_arguments[input_name] = None
    216+
    217+
    218+
    def run(nodes: List[dict], primary_start_node: str, graph_info: dict, sample_user_inputs: dict = None, agent_workflow_node_id: str = None, workflow_vars: dict = {}, topological_dfs_stack: List = []):
    219+
    from .api_class import WorkflowNodeInputType
    220+
    source_code = graph_info['source_code']
    221+
    exec(source_code, globals())
    222+
    223+
    nodes_info: dict = {node['name']: node for node in nodes}
    224+
    traversal_orders = graph_info['traversal_orders']
    225+
    nodes_ancestors = graph_info['nodes_ancestors']
    226+
    nodes_inedges = graph_info['nodes_inedges']
    227+
    primary_start_node = primary_start_node or graph_info['default_root_node']
    228+
    229+
    primary_traversal_order = traversal_orders[primary_start_node]
    230+
    run_info = {}
    231+
    workflow_vars = workflow_vars.copy()
    232+
    next_agent_workflow_node_id = None
    233+
    234+
    if agent_workflow_node_id:
    235+
    next_agent_workflow_node_id = agent_workflow_node_id
    236+
    if next_agent_workflow_node_id not in traversal_orders.keys():
    237+
    if next_agent_workflow_node_id not in nodes_info:
    238+
    raise ValueError(
    239+
    f'The provided workflow node id "{next_agent_workflow_node_id}" is not part of the workflow. Please provide a valid node id.')
    240+
    else:
    241+
    topological_dfs_stack.append(next_agent_workflow_node_id)
    242+
    else:
    243+
    topological_dfs_stack = [next_agent_workflow_node_id]
    244+
    else:
    245+
    next_agent_workflow_node_id = primary_start_node
    246+
    topological_dfs_stack = [primary_start_node]
    247+
    248+
    flow_traversal_order = primary_traversal_order
    249+
    for root, traversal_order in traversal_orders.items():
    250+
    if next_agent_workflow_node_id in traversal_order:
    251+
    flow_traversal_order = traversal_order
    252+
    break
    253+
    254+
    run_history = []
    255+
    workflow_node_outputs = {}
    256+
    while (True):
    257+
    agent_workflow_node_id = next_agent_workflow_node_id
    258+
    node_ancestors = nodes_ancestors[agent_workflow_node_id]
    259+
    260+
    # To ensure the node takes inputs only from it's ancestors.
    261+
    # workflow_vars must always contain an entry for ancestor, the error is somewhere else if this ever errors out.
    262+
    filtered_workflow_vars = {}
    263+
    for ancestor in node_ancestors:
    264+
    if ancestor not in workflow_vars:
    265+
    raise ValueError(
    266+
    f'Ancestor "{ancestor}" of node "{agent_workflow_node_id}" is not executed yet. Please make sure the ancestor nodes are executed before the current node.')
    267+
    else:
    268+
    filtered_workflow_vars[ancestor] = workflow_vars[ancestor]
    269+
    270+
    arguments = []
    271+
    keyword_arguments = {}
    272+
    validate_workflow_node_inputs(nodes_info, agent_workflow_node_id,
    273+
    keyword_arguments, sample_user_inputs, filtered_workflow_vars)
    274+
    275+
    try:
    276+
    func = eval(nodes_info[agent_workflow_node_id]['function_name'])
    277+
    node_response = func(*arguments, **keyword_arguments)
    278+
    workflow_node_outputs[agent_workflow_node_id] = node_response.to_dict(
    279+
    )
    280+
    node_workflow_vars = process_node_response(node_response)
    281+
    except Exception as error:
    282+
    raise ValueError(
    283+
    f'Error in running workflow node {agent_workflow_node_id}: {error}')
    284+
    285+
    workflow_vars[agent_workflow_node_id] = node_workflow_vars
    286+
    next_agent_workflow_node_id = None
    287+
    needs_user_input = False
    288+
    289+
    potential_next_index = flow_traversal_order.index(
    290+
    topological_dfs_stack[-1]) + 1
    291+
    potential_next_agent_workflow_node_id = None
    292+
    while (potential_next_index < len(flow_traversal_order)):
    293+
    potential_next_agent_workflow_node_id = flow_traversal_order[potential_next_index]
    294+
    incoming_edges = nodes_inedges[potential_next_agent_workflow_node_id]
    295+
    valid_next_node = True
    296+
    for source, _, details in incoming_edges:
    297+
    if source not in topological_dfs_stack:
    298+
    valid_next_node = False
    299+
    potential_next_index += 1
    300+
    break
    301+
    else:
    302+
    edge_evaluate_result = evaluate_edge_condition(
    303+
    source, potential_next_agent_workflow_node_id, details, workflow_vars)
    304+
    if not edge_evaluate_result:
    305+
    valid_next_node = False
    306+
    potential_next_index += 1
    307+
    break
    308+
    if valid_next_node:
    309+
    next_agent_workflow_node_id = potential_next_agent_workflow_node_id
    310+
    break
    311+
    312+
    if next_agent_workflow_node_id:
    313+
    next_node_input_mappings = nodes_info[next_agent_workflow_node_id].get(
    314+
    'input_mappings', [])
    315+
    needs_user_input = any([input_mapping['variable_type'] ==
    316+
    WorkflowNodeInputType.USER_INPUT for input_mapping in next_node_input_mappings])
    317+
    318+
    if needs_user_input:
    319+
    run_history.append(
    320+
    f'Workflow node {agent_workflow_node_id} completed with next node {next_agent_workflow_node_id} and needs user_inputs')
    321+
    else:
    322+
    run_history.append(
    323+
    f'Workflow node {agent_workflow_node_id} completed with next node {next_agent_workflow_node_id}')
    324+
    topological_dfs_stack.append(next_agent_workflow_node_id)
    325+
    326+
    if next_agent_workflow_node_id is None or needs_user_input:
    327+
    break
    328+
    329+
    run_info['workflow_node_outputs'] = workflow_node_outputs
    330+
    run_info['run_history'] = run_history
    331+
    332+
    workflow_info = {}
    333+
    workflow_info['workflow_vars'] = workflow_vars
    334+
    workflow_info['topological_dfs_stack'] = topological_dfs_stack
    335+
    workflow_info['run_info'] = run_info
    336+
    337+
    return workflow_info
    338+
    339+
    340+
    def evaluate_edge_condition(source, target, details, workflow_vars):
    341+
    try:
    342+
    condition = details.get('EXECUTION_CONDITION')
    343+
    if condition:
    344+
    result = execute_python_source(
    345+
    condition, workflow_vars.get(source, {}))
    346+
    return result
    347+
    return True
    348+
    except Exception as e:
    349+
    raise ValueError(
    350+
    f"Error evaluating edge '{source}'-->'{target}': {str(e)}")
    351+
    352+
    353+
    def execute_python_source(python_expression, variables):
    354+
    try:
    355+
    # Evaluate the expression using the variables dictionary
    356+
    result = eval(python_expression, {}, variables)
    357+
    return result
    358+
    except Exception as e:
    359+
    # Handle any exceptions that may occur during evaluation
    360+
    raise ValueError(f'Error evaluating expression: {e}')
    361+
    362+
    363+
    def process_node_response(node_response):
    364+
    output_vars = {}
    365+
    for variable in node_response.section_data_list:
    366+
    for key, value in variable.items():
    367+
    output_vars[key] = value
    368+
    return output_vars
    369+
    370+
    183371
    class StreamType(Enum):
    184372
    MESSAGE = 'message'
    185373
    SECTION_OUTPUT = 'section_output'

    0 commit comments

    Comments
     (0)
    0