1. Anuncie Aqui ! Entre em contato fdantas@4each.com.br

[Python] I keep getting TensorFlow ValueError and I dont know what is causing it

Discussão em 'Python' iniciado por Stack, Setembro 12, 2024.

  1. Stack

    Stack Membro Participativo

    So i installed everything to run a jupyter notebook in Visual studio code and have tensorflow and python fully updated. However, I keep running into an error when I run one of the Kernels. Specifically,

    File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\utils\traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs)
    119 filtered_tb = _process_traceback_frames(e.__traceback__)
    120 # To get the full stack trace, call:
    121 # `keras.config.disable_traceback_filtering()`
    --> 122 raise e.with_traceback(filtered_tb) from None
    123 finally:
    124 del filtered_tb


    File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\trainers\data_adapters\__init__.py:113, in get_data_adapter(x, y, sample_weight, batch_size, steps_per_epoch, shuffle, class_weight)
    105 return GeneratorDataAdapter(x)
    106 # TODO: should we warn or not?
    107 # warnings.warn(
    108 # "`shuffle=True` was passed, but will be ignored since the "
    (...)
    111 # )
    112 else:
    --> 113 raise ValueError(f"Unrecognized data type: x={x} (of type {type(x)})


    at the end it says ValueError: unrecognized data type x=[10.0] (of type <class 'list'>)

    Is there any way to fix this?

    This is my current code:

    import tensorflow as tf
    import numpy as np
    from tensorflow import keras
    model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
    model.compile(optimizer='sgd', loss='mean_squared_error')
    xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
    ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
    model.fit(xs, ys, epochs=500)
    print(model.predict([10.0])) #This is where the error is


    This is where the Value error is

    if array_data_adapter.can_convert_arrays((x, y, sample_weight)):
    return ArrayDataAdapter(
    x,
    y,
    sample_weight=sample_weight,
    class_weight=class_weight,
    shuffle=shuffle,
    batch_size=batch_size,
    steps=steps_per_epoch,
    )
    elif is_tf_dataset(x):
    # Unsupported args: y, sample_weight, shuffle
    if y is not None:
    raise_unsupported_arg("y", "the targets", "tf.data.Dataset")
    if sample_weight is not None:
    raise_unsupported_arg(
    "sample_weights", "the sample weights", "tf.data.Dataset"
    )
    return TFDatasetAdapter(
    x, class_weight=class_weight, distribution=distribution
    )
    # TODO: should we warn or not?
    # warnings.warn(
    # "`shuffle=True` was passed, but will be ignored since the "
    # "data `x` was provided as a tf.data.Dataset. The Dataset is "
    # "expected to already be shuffled "
    # "(via `.shuffle(tf.data.AUTOTUNE)`)"
    # )
    elif isinstance(x, py_dataset_adapter.PyDataset):
    if y is not None:
    raise_unsupported_arg("y", "the targets", "PyDataset")
    if sample_weight is not None:
    raise_unsupported_arg(
    "sample_weights", "the sample weights", "PyDataset"
    )
    return PyDatasetAdapter(x, class_weight=class_weight, shuffle=shuffle)
    elif is_torch_dataloader(x):
    if y is not None:
    raise_unsupported_arg("y", "the targets", "torch DataLoader")
    if sample_weight is not None:
    raise_unsupported_arg(
    "sample_weights", "the sample weights", "torch DataLoader"
    )
    if class_weight is not None:
    raise ValueError(
    "Argument `class_weight` is not supported for torch "
    f"DataLoader inputs. Received: class_weight={class_weight}"
    )
    return TorchDataLoaderAdapter(x)
    # TODO: should we warn or not?
    # warnings.warn(
    # "`shuffle=True` was passed, but will be ignored since the "
    # "data `x` was provided as a torch DataLoader. The DataLoader "
    # "is expected to already be shuffled."
    # )
    elif isinstance(x, types.GeneratorType):
    if y is not None:
    raise_unsupported_arg("y", "the targets", "PyDataset")
    if sample_weight is not None:
    raise_unsupported_arg(
    "sample_weights", "the sample weights", "PyDataset"
    )
    if class_weight is not None:
    raise ValueError(
    "Argument `class_weight` is not supported for Python "
    f"generator inputs. Received: class_weight={class_weight}"
    )
    return GeneratorDataAdapter(x)
    # TODO: should we warn or not?
    # warnings.warn(
    # "`shuffle=True` was passed, but will be ignored since the "
    # "data `x` was provided as a generator. The generator "
    # "is expected to yield already-shuffled data."
    # )
    else:
    raise ValueError(f"Unrecognized data type: x={x} (of type {type(x)})")


    This is another segment of code that is highlighted

    def filter_traceback(fn):
    """Filter out Keras-internal traceback frames in exceptions raised by fn."""

    @wraps(fn)
    def error_handler(*args, **kwargs):
    if not is_traceback_filtering_enabled():
    return fn(*args, **kwargs)

    filtered_tb = None
    try:
    return fn(*args, **kwargs)
    except Exception as e:
    filtered_tb = _process_traceback_frames(e.__traceback__)
    # To get the full stack trace, call:
    # `keras.config.disable_traceback_filtering()`
    raise e.with_traceback(filtered_tb) from None
    finally:
    del filtered_tb

    Continue reading...

Compartilhe esta Página