From d5f63b3e869bdbf9196782e3eb8c36ed4b3d9517 Mon Sep 17 00:00:00 2001 From: Steffen Cruz Date: Fri, 8 Dec 2023 14:02:56 -0600 Subject: [PATCH] Try lots of stuff. not working --- neurons/miner.py | 54 +++-- neurons/validator.py | 6 +- requirements.txt | 8 +- template/__init__.py | 33 --- template/base/__init__.py | 0 template/base/miner.py | 215 -------------------- template/base/neuron.py | 168 ---------------- template/base/validator.py | 332 ------------------------------- template/protocol.py | 76 ------- template/utils/__init__.py | 3 - template/utils/config.py | 177 ---------------- template/utils/misc.py | 112 ----------- template/utils/uids.py | 63 ------ template/validator/__init__.py | 2 - template/validator/forward.py | 61 ------ template/validator/reward.py | 54 ----- tests/test_template_validator.py | 10 +- 17 files changed, 52 insertions(+), 1322 deletions(-) delete mode 100644 template/__init__.py delete mode 100644 template/base/__init__.py delete mode 100644 template/base/miner.py delete mode 100644 template/base/neuron.py delete mode 100644 template/base/validator.py delete mode 100644 template/protocol.py delete mode 100644 template/utils/__init__.py delete mode 100644 template/utils/config.py delete mode 100644 template/utils/misc.py delete mode 100644 template/utils/uids.py delete mode 100644 template/validator/__init__.py delete mode 100644 template/validator/forward.py delete mode 100644 template/validator/reward.py diff --git a/neurons/miner.py b/neurons/miner.py index 274ff9d..4cb8bcf 100644 --- a/neurons/miner.py +++ b/neurons/miner.py @@ -20,12 +20,13 @@ import time import typing import bittensor as bt +import pytesseract # Bittensor Miner Template: -import template +import ocr_subnet # import base miner class which takes care of most of the boilerplate -from template.base.miner import BaseMinerNeuron +from ocr_subnet.base.miner import BaseMinerNeuron class Miner(BaseMinerNeuron): @@ -43,27 +44,48 @@ def __init__(self, config=None): # TODO(developer): Anything specific to your use case you can do here async def forward( - self, synapse: template.protocol.Dummy - ) -> template.protocol.Dummy: + self, synapse: ocr_subnet.protocol.OCRSynapse + ) -> ocr_subnet.protocol.OCRSynapse: """ - Processes the incoming 'Dummy' synapse by performing a predefined operation on the input data. - This method should be replaced with actual logic relevant to the miner's purpose. + Processes the incoming OCR synapse and attaches the response to the synapse. Args: - synapse (template.protocol.Dummy): The synapse object containing the 'dummy_input' data. + synapse (ocr_subnet.protocol.OCRSynapse): The synapse object containing the image data. Returns: - template.protocol.Dummy: The synapse object with the 'dummy_output' field set to twice the 'dummy_input' value. + ocr_subnet.protocol.OCRSynapse: The synapse object with the 'response' field set to the extracted data. - The 'forward' function is a placeholder and should be overridden with logic that is appropriate for - the miner's intended operation. This method demonstrates a basic transformation of input data. """ - # TODO(developer): Replace with actual implementation logic. - synapse.dummy_output = synapse.dummy_input * 2 + + image = synapse.image + # Use pytesseract to get the data + data = pytesseract.image_to_data(image, output_type=pytesseract.Output.DICT) + + # Initialize the response list + response = [] + + # Loop over each item in the 'text' part of the data + for i in range(len(data['text'])): + if data['text'][i].strip() != '': # This filters out empty text results + x1, y1, width, height = data['left'][i], data['top'][i], data['width'][i], data['height'][i] + x2, y2 = x1 + width, y1 + height + + # Here we don't have font information, so we'll omit that. + # Pytesseract does not extract font family or size information. + entry = { + 'index': i, + 'position': [x1, y1, x2, y2], + 'text': data['text'][i] + } + response.append(entry) + + # Attach response to synapse and return it. + synapse.response = response + return synapse async def blacklist( - self, synapse: template.protocol.Dummy + self, synapse: ocr_subnet.protocol.OCRSynapse ) -> typing.Tuple[bool, str]: """ Determines whether an incoming request should be blacklisted and thus ignored. Your implementation should @@ -74,7 +96,7 @@ async def blacklist( requests before they are deserialized to avoid wasting resources on requests that will be ignored. Args: - synapse (template.protocol.Dummy): A synapse object constructed from the headers of the incoming request. + synapse (template.protocol.OCRSynapse): A synapse object constructed from the headers of the incoming request. Returns: Tuple[bool, str]: A tuple containing a boolean indicating whether the synapse's hotkey is blacklisted, @@ -107,7 +129,7 @@ async def blacklist( ) return False, "Hotkey recognized!" - async def priority(self, synapse: template.protocol.Dummy) -> float: + async def priority(self, synapse: ocr_subnet.protocol.OCRSynapse) -> float: """ The priority function determines the order in which requests are handled. More valuable or higher-priority requests are processed before others. You should design your own priority mechanism with care. @@ -115,7 +137,7 @@ async def priority(self, synapse: template.protocol.Dummy) -> float: This implementation assigns priority to incoming requests based on the calling entity's stake in the metagraph. Args: - synapse (template.protocol.Dummy): The synapse object that contains metadata about the incoming request. + synapse (template.protocol.OCRSynapse): The synapse object that contains metadata about the incoming request. Returns: float: A priority score derived from the stake of the calling entity. diff --git a/neurons/validator.py b/neurons/validator.py index 7b50202..29560dc 100644 --- a/neurons/validator.py +++ b/neurons/validator.py @@ -23,12 +23,10 @@ # Bittensor import bittensor as bt -# Bittensor Validator Template: -import template -from template.validator import forward +from ocr_subnet.validator import forward # import base validator class which takes care of most of the boilerplate -from template.base.validator import BaseValidatorNeuron +from ocr_subnet.base.validator import BaseValidatorNeuron class Validator(BaseValidatorNeuron): diff --git a/requirements.txt b/requirements.txt index c1b866e..4fd9c55 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,8 @@ bittensor -torch \ No newline at end of file +torch +pytesseract +pandas +faker +reportlab +pdf2image +editdistance \ No newline at end of file diff --git a/template/__init__.py b/template/__init__.py deleted file mode 100644 index 4854a3f..0000000 --- a/template/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao -# TODO(developer): Set your name -# Copyright © 2023 - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -# TODO(developer): Change this value when updating your code base. -# Define the version of the template module. -__version__ = "0.0.0" -version_split = __version__.split(".") -__spec_version__ = ( - (1000 * int(version_split[0])) - + (10 * int(version_split[1])) - + (1 * int(version_split[2])) -) - -# Import all submodules. -from . import protocol -from . import base -from . import validator diff --git a/template/base/__init__.py b/template/base/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/template/base/miner.py b/template/base/miner.py deleted file mode 100644 index d4a1738..0000000 --- a/template/base/miner.py +++ /dev/null @@ -1,215 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import time -import torch -import asyncio -import threading -import traceback - -import bittensor as bt - -from template.base.neuron import BaseNeuron - - -class BaseMinerNeuron(BaseNeuron): - """ - Base class for Bittensor miners. - """ - - def __init__(self, config=None): - super().__init__(config=config) - - # Warn if allowing incoming requests from anyone. - if not self.config.blacklist.force_validator_permit: - bt.logging.warning( - "You are allowing non-validators to send requests to your miner. This is a security risk." - ) - if self.config.blacklist.allow_non_registered: - bt.logging.warning( - "You are allowing non-registered entities to send requests to your miner. This is a security risk." - ) - - # The axon handles request processing, allowing validators to send this miner requests. - self.axon = bt.axon(wallet=self.wallet, port=self.config.axon.port) - - # Attach determiners which functions are called when servicing a request. - bt.logging.info(f"Attaching forward function to miner axon.") - self.axon.attach( - forward_fn=self.forward, - blacklist_fn=self.blacklist, - priority_fn=self.priority, - ) - bt.logging.info(f"Axon created: {self.axon}") - - # Instantiate runners - self.should_exit: bool = False - self.is_running: bool = False - self.thread: threading.Thread = None - self.lock = asyncio.Lock() - - def run(self): - """ - Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors. - - This function performs the following primary tasks: - 1. Check for registration on the Bittensor network. - 2. Starts the miner's axon, making it active on the network. - 3. Periodically resynchronizes with the chain; updating the metagraph with the latest network state and setting weights. - - The miner continues its operations until `should_exit` is set to True or an external interruption occurs. - During each epoch of its operation, the miner waits for new blocks on the Bittensor network, updates its - knowledge of the network (metagraph), and sets its weights. This process ensures the miner remains active - and up-to-date with the network's latest state. - - Note: - - The function leverages the global configurations set during the initialization of the miner. - - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests. - - Raises: - KeyboardInterrupt: If the miner is stopped by a manual interruption. - Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis. - """ - - # Check that miner is registered on the network. - self.sync() - - # Serve passes the axon information to the network + netuid we are hosting on. - # This will auto-update if the axon port of external ip have changed. - bt.logging.info( - f"Serving miner axon {self.axon} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}" - ) - self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) - - # Start starts the miner's axon, making it active on the network. - self.axon.start() - - bt.logging.info(f"Miner starting at block: {self.block}") - - # This loop maintains the miner's operations until intentionally stopped. - try: - while not self.should_exit: - while ( - self.block - self.metagraph.last_update[self.uid] - < self.config.neuron.epoch_length - ): - # Wait before checking again. - time.sleep(1) - - # Check if we should exit. - if self.should_exit: - break - - # Sync metagraph and potentially set weights. - self.sync() - self.step += 1 - - # If someone intentionally stops the miner, it'll safely terminate operations. - except KeyboardInterrupt: - self.axon.stop() - bt.logging.success("Miner killed by keyboard interrupt.") - exit() - - # In case of unforeseen errors, the miner will log the error and continue operations. - except Exception as e: - bt.logging.error(traceback.format_exc()) - - def run_in_background_thread(self): - """ - Starts the miner's operations in a separate background thread. - This is useful for non-blocking operations. - """ - if not self.is_running: - bt.logging.debug("Starting miner in background thread.") - self.should_exit = False - self.thread = threading.Thread(target=self.run, daemon=True) - self.thread.start() - self.is_running = True - bt.logging.debug("Started") - - def stop_run_thread(self): - """ - Stops the miner's operations that are running in the background thread. - """ - if self.is_running: - bt.logging.debug("Stopping miner in background thread.") - self.should_exit = True - self.thread.join(5) - self.is_running = False - bt.logging.debug("Stopped") - - def __enter__(self): - """ - Starts the miner's operations in a background thread upon entering the context. - This method facilitates the use of the miner in a 'with' statement. - """ - self.run_in_background_thread() - return self - - def __exit__(self, exc_type, exc_value, traceback): - """ - Stops the miner's background operations upon exiting the context. - This method facilitates the use of the miner in a 'with' statement. - - Args: - exc_type: The type of the exception that caused the context to be exited. - None if the context was exited without an exception. - exc_value: The instance of the exception that caused the context to be exited. - None if the context was exited without an exception. - traceback: A traceback object encoding the stack trace. - None if the context was exited without an exception. - """ - self.stop_run_thread() - - def set_weights(self): - """ - Self-assigns a weight of 1 to the current miner (identified by its UID) and - a weight of 0 to all other peers in the network. The weights determine the trust level the miner assigns to other nodes on the network. - - Raises: - Exception: If there's an error while setting weights, the exception is logged for diagnosis. - """ - try: - # --- query the chain for the most current number of peers on the network - chain_weights = torch.zeros( - self.subtensor.subnetwork_n(netuid=self.metagraph.netuid) - ) - chain_weights[self.uid] = 1 - - # --- Set weights. - self.subtensor.set_weights( - wallet=self.wallet, - netuid=self.metagraph.netuid, - uids=torch.arange(0, len(chain_weights)), - weights=chain_weights.to("cpu"), - wait_for_inclusion=False, - version_key=self.spec_version, - ) - - except Exception as e: - bt.logging.error( - f"Failed to set weights on chain with exception: { e }" - ) - - bt.logging.info(f"Set weights: {chain_weights}") - - def resync_metagraph(self): - """Resyncs the metagraph and updates the hotkeys and moving averages based on the new metagraph.""" - bt.logging.info("resync_metagraph()") - - # Sync the metagraph. - self.metagraph.sync(subtensor=self.subtensor) diff --git a/template/base/neuron.py b/template/base/neuron.py deleted file mode 100644 index ef2caf0..0000000 --- a/template/base/neuron.py +++ /dev/null @@ -1,168 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import copy -import typing - -import bittensor as bt - -from abc import ABC, abstractmethod - -# Sync calls set weights and also resyncs the metagraph. -from template.utils.config import check_config, add_args, config -from template.utils.misc import ttl_get_block -from template import __spec_version__ as spec_version - - -class BaseNeuron(ABC): - """ - Base class for Bittensor miners. This class is abstract and should be inherited by a subclass. It contains the core logic for all neurons; validators and miners. - - In addition to creating a wallet, subtensor, and metagraph, this class also handles the synchronization of the network state via a basic checkpointing mechanism based on epoch length. - """ - - @classmethod - def check_config(cls, config: "bt.Config"): - check_config(cls, config) - - @classmethod - def add_args(cls, parser): - add_args(cls, parser) - - @classmethod - def config(cls): - return config(cls) - - subtensor: "bt.subtensor" - wallet: "bt.wallet" - metagraph: "bt.metagraph" - spec_version: int = spec_version - - @property - def block(self): - return ttl_get_block(self) - - def __init__(self, config=None): - base_config = copy.deepcopy(config or BaseNeuron.config()) - self.config = self.config() - self.config.merge(base_config) - self.check_config(self.config) - - # Set up logging with the provided configuration and directory. - bt.logging(config=self.config, logging_dir=self.config.full_path) - - # If a gpu is required, set the device to cuda:N (e.g. cuda:0) - self.device = self.config.neuron.device - - # Log the configuration for reference. - bt.logging.info(self.config) - - # Build Bittensor objects - # These are core Bittensor classes to interact with the network. - bt.logging.info("Setting up bittensor objects.") - - # The wallet holds the cryptographic key pairs for the miner. - self.wallet = bt.wallet(config=self.config) - bt.logging.info(f"Wallet: {self.wallet}") - - # The subtensor is our connection to the Bittensor blockchain. - self.subtensor = bt.subtensor(config=self.config) - bt.logging.info(f"Subtensor: {self.subtensor}") - - # The metagraph holds the state of the network, letting us know about other validators and miners. - self.metagraph = self.subtensor.metagraph(self.config.netuid) - bt.logging.info(f"Metagraph: {self.metagraph}") - - # Check if the miner is registered on the Bittensor network before proceeding further. - self.check_registered() - - # Each miner gets a unique identity (UID) in the network for differentiation. - self.uid = self.metagraph.hotkeys.index( - self.wallet.hotkey.ss58_address - ) - bt.logging.info( - f"Running neuron on subnet: {self.config.netuid} with uid {self.uid} using network: {self.subtensor.chain_endpoint}" - ) - self.step = 0 - - @abstractmethod - async def forward(self, synapse: bt.Synapse) -> bt.Synapse: - ... - - @abstractmethod - def run(self): - ... - - def sync(self): - """ - Wrapper for synchronizing the state of the network for the given miner or validator. - """ - # Ensure miner or validator hotkey is still registered on the network. - self.check_registered() - - if self.should_sync_metagraph(): - self.resync_metagraph() - - if self.should_set_weights(): - self.set_weights() - - # Always save state. - self.save_state() - - def check_registered(self): - # --- Check for registration. - if not self.subtensor.is_hotkey_registered( - netuid=self.config.netuid, - hotkey_ss58=self.wallet.hotkey.ss58_address, - ): - bt.logging.error( - f"Wallet: {self.wallet} is not registered on netuid {self.config.netuid}." - f" Please register the hotkey using `btcli subnets register` before trying again" - ) - exit() - - def should_sync_metagraph(self): - """ - Check if enough epoch blocks have elapsed since the last checkpoint to sync. - """ - return ( - self.block - self.metagraph.last_update[self.uid] - ) > self.config.neuron.epoch_length - - def should_set_weights(self) -> bool: - # Don't set weights on initialization. - if self.step == 0: - return False - - # Check if enough epoch blocks have elapsed since the last epoch. - if self.config.neuron.disable_set_weights: - return False - - # Define appropriate logic for when set weights. - return ( - self.block - self.metagraph.last_update[self.uid] - ) > self.config.neuron.epoch_length - - def save_state(self): - bt.logging.warning( - "save_state() not implemented for this neuron. You can implement this function to save model checkpoints or other useful data." - ) - - def load_state(self): - bt.logging.warning( - "load_state() not implemented for this neuron. You can implement this function to load model checkpoints or other useful data." - ) diff --git a/template/base/validator.py b/template/base/validator.py deleted file mode 100644 index 2a67ef7..0000000 --- a/template/base/validator.py +++ /dev/null @@ -1,332 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao -# TODO(developer): Set your name -# Copyright © 2023 - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -import copy -import torch -import asyncio -import threading -import bittensor as bt - -from typing import List -from traceback import print_exception - -from template.base.neuron import BaseNeuron - - -class BaseValidatorNeuron(BaseNeuron): - """ - Base class for Bittensor validators. Your validator should inherit from this class. - """ - - def __init__(self, config=None): - super().__init__(config=config) - - # Save a copy of the hotkeys to local memory. - self.hotkeys = copy.deepcopy(self.metagraph.hotkeys) - - # Dendrite lets us send messages to other nodes (axons) in the network. - self.dendrite = bt.dendrite(wallet=self.wallet) - bt.logging.info(f"Dendrite: {self.dendrite}") - - # Set up initial scoring weights for validation - bt.logging.info("Building validation weights.") - self.scores = torch.zeros_like(self.metagraph.S, dtype=torch.float32) - - # Init sync with the network. Updates the metagraph. - self.sync() - - # Serve axon to enable external connections. - if not self.config.neuron.axon_off: - self.serve_axon() - else: - bt.logging.warning("axon off, not serving ip to chain.") - - # Create asyncio event loop to manage async tasks. - self.loop = asyncio.get_event_loop() - - # Instantiate runners - self.should_exit: bool = False - self.is_running: bool = False - self.thread: threading.Thread = None - self.lock = asyncio.Lock() - - def serve_axon(self): - """Serve axon to enable external connections.""" - - bt.logging.info("serving ip to chain...") - try: - self.axon = bt.axon(wallet=self.wallet, config=self.config) - - try: - self.subtensor.serve_axon( - netuid=self.config.netuid, - axon=self.axon, - ) - except Exception as e: - bt.logging.error(f"Failed to serve Axon with exception: {e}") - pass - - except Exception as e: - bt.logging.error( - f"Failed to create Axon initialize with exception: {e}" - ) - pass - - async def concurrent_forward(self): - coroutines = [ - self.forward() - for _ in range(self.config.neuron.num_concurrent_forwards) - ] - await asyncio.gather(*coroutines) - - def run(self): - """ - Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors. - - This function performs the following primary tasks: - 1. Check for registration on the Bittensor network. - 2. Continuously forwards queries to the miners on the network, rewarding their responses and updating the scores accordingly. - 3. Periodically resynchronizes with the chain; updating the metagraph with the latest network state and setting weights. - - The essence of the validator's operations is in the forward function, which is called every step. The forward function is responsible for querying the network and scoring the responses. - - Note: - - The function leverages the global configurations set during the initialization of the miner. - - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests. - - Raises: - KeyboardInterrupt: If the miner is stopped by a manual interruption. - Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis. - """ - - # Check that validator is registered on the network. - self.sync() - - bt.logging.info( - f"Running validator {self.axon} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}" - ) - - bt.logging.info(f"Validator starting at block: {self.block}") - - # This loop maintains the validator's operations until intentionally stopped. - try: - while True: - bt.logging.info(f"step({self.step}) block({self.block})") - - # Run multiple forwards concurrently. - self.loop.run_until_complete(self.concurrent_forward()) - - # Check if we should exit. - if self.should_exit: - break - - # Sync metagraph and potentially set weights. - self.sync() - - self.step += 1 - - # If someone intentionally stops the validator, it'll safely terminate operations. - except KeyboardInterrupt: - self.axon.stop() - bt.logging.success("Validator killed by keyboard interrupt.") - exit() - - # In case of unforeseen errors, the validator will log the error and continue operations. - except Exception as err: - bt.logging.error("Error during validation", str(err)) - bt.logging.debug( - print_exception(type(err), err, err.__traceback__) - ) - - def run_in_background_thread(self): - """ - Starts the validator's operations in a background thread upon entering the context. - This method facilitates the use of the validator in a 'with' statement. - """ - if not self.is_running: - bt.logging.debug("Starting validator in background thread.") - self.should_exit = False - self.thread = threading.Thread(target=self.run, daemon=True) - self.thread.start() - self.is_running = True - bt.logging.debug("Started") - - def stop_run_thread(self): - """ - Stops the validator's operations that are running in the background thread. - """ - if self.is_running: - bt.logging.debug("Stopping validator in background thread.") - self.should_exit = True - self.thread.join(5) - self.is_running = False - bt.logging.debug("Stopped") - - def __enter__(self): - self.run_in_background_thread() - return self - - def __exit__(self, exc_type, exc_value, traceback): - """ - Stops the validator's background operations upon exiting the context. - This method facilitates the use of the validator in a 'with' statement. - - Args: - exc_type: The type of the exception that caused the context to be exited. - None if the context was exited without an exception. - exc_value: The instance of the exception that caused the context to be exited. - None if the context was exited without an exception. - traceback: A traceback object encoding the stack trace. - None if the context was exited without an exception. - """ - if self.is_running: - bt.logging.debug("Stopping validator in background thread.") - self.should_exit = True - self.thread.join(5) - self.is_running = False - bt.logging.debug("Stopped") - - def set_weights(self): - """ - Sets the validator weights to the metagraph hotkeys based on the scores it has received from the miners. The weights determine the trust and incentive level the validator assigns to miner nodes on the network. - """ - - # Check if self.scores contains any NaN values and log a warning if it does. - if torch.isnan(self.scores).any(): - bt.logging.warning( - f"Scores contain NaN values. This may be due to a lack of responses from miners, or a bug in your reward functions." - ) - - # Calculate the average reward for each uid across non-zero values. - # Replace any NaN values with 0. - raw_weights = torch.nn.functional.normalize(self.scores, p=1, dim=0) - bt.logging.trace("raw_weights", raw_weights) - bt.logging.trace("top10 values", raw_weights.sort()[0]) - bt.logging.trace("top10 uids", raw_weights.sort()[1]) - - # Process the raw weights to final_weights via subtensor limitations. - ( - processed_weight_uids, - processed_weights, - ) = bt.utils.weight_utils.process_weights_for_netuid( - uids=self.metagraph.uids.to("cpu"), - weights=raw_weights.to("cpu"), - netuid=self.config.netuid, - subtensor=self.subtensor, - metagraph=self.metagraph, - ) - bt.logging.trace("processed_weights", processed_weights) - bt.logging.trace("processed_weight_uids", processed_weight_uids) - - # Set the weights on chain via our subtensor connection. - self.subtensor.set_weights( - wallet=self.wallet, - netuid=self.config.netuid, - uids=processed_weight_uids, - weights=processed_weights, - wait_for_finalization=False, - version_key=self.spec_version, - ) - - bt.logging.info(f"Set weights: {processed_weights}") - - def resync_metagraph(self): - """Resyncs the metagraph and updates the hotkeys and moving averages based on the new metagraph.""" - bt.logging.info("resync_metagraph()") - - # Copies state of metagraph before syncing. - previous_metagraph = copy.deepcopy(self.metagraph) - - # Sync the metagraph. - self.metagraph.sync(subtensor=self.subtensor) - - # Check if the metagraph axon info has changed. - if previous_metagraph.axons == self.metagraph.axons: - return - - bt.logging.info( - "Metagraph updated, re-syncing hotkeys, dendrite pool and moving averages" - ) - # Zero out all hotkeys that have been replaced. - for uid, hotkey in enumerate(self.hotkeys): - if hotkey != self.metagraph.hotkeys[uid]: - self.scores[uid] = 0 # hotkey has been replaced - - # Check to see if the metagraph has changed size. - # If so, we need to add new hotkeys and moving averages. - if len(self.hotkeys) < len(self.metagraph.hotkeys): - # Update the size of the moving average scores. - new_moving_average = torch.zeros((self.metagraph.n)).to( - self.device - ) - min_len = min(len(self.hotkeys), len(self.scores)) - new_moving_average[:min_len] = self.scores[:min_len] - self.scores = new_moving_average - - # Update the hotkeys. - self.hotkeys = copy.deepcopy(self.metagraph.hotkeys) - - def update_scores(self, rewards: torch.FloatTensor, uids: List[int]): - """Performs exponential moving average on the scores based on the rewards received from the miners.""" - - # Check if rewards contains NaN values. - if torch.isnan(rewards).any(): - bt.logging.warning(f"NaN values detected in rewards: {rewards}") - # Replace any NaN values in rewards with 0. - rewards = torch.nan_to_num(rewards, 0) - - # Compute forward pass rewards, assumes uids are mutually exclusive. - # shape: [ metagraph.n ] - scattered_rewards: torch.FloatTensor = self.scores.scatter( - 0, torch.tensor(uids).to(self.device), rewards - ).to(self.device) - bt.logging.debug(f"Scattered rewards: {rewards}") - - # Update scores with rewards produced by this step. - # shape: [ metagraph.n ] - alpha: float = self.config.neuron.moving_average_alpha - self.scores: torch.FloatTensor = alpha * scattered_rewards + ( - 1 - alpha - ) * self.scores.to(self.device) - bt.logging.debug(f"Updated moving avg scores: {self.scores}") - - def save_state(self): - """Saves the state of the validator to a file.""" - bt.logging.info("Saving validator state.") - - # Save the state of the validator to file. - torch.save( - { - "step": self.step, - "scores": self.scores, - "hotkeys": self.hotkeys, - }, - self.config.neuron.full_path + "/state.pt", - ) - - def load_state(self): - """Loads the state of the validator from a file.""" - bt.logging.info("Loading validator state.") - - # Load the state of the validator from file. - state = torch.load(self.config.neuron.full_path + "/state.pt") - self.step = state["step"] - self.scores = state["scores"] - self.hotkeys = state["hotkeys"] diff --git a/template/protocol.py b/template/protocol.py deleted file mode 100644 index b7c50b9..0000000 --- a/template/protocol.py +++ /dev/null @@ -1,76 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao -# TODO(developer): Set your name -# Copyright © 2023 - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import typing -import bittensor as bt - -# TODO(developer): Rewrite with your protocol definition. - -# This is the protocol for the dummy miner and validator. -# It is a simple request-response protocol where the validator sends a request -# to the miner, and the miner responds with a dummy response. - -# ---- miner ---- -# Example usage: -# def dummy( synapse: Dummy ) -> Dummy: -# synapse.dummy_output = synapse.dummy_input + 1 -# return synapse -# axon = bt.axon().attach( dummy ).serve(netuid=...).start() - -# ---- validator --- -# Example usage: -# dendrite = bt.dendrite() -# dummy_output = dendrite.query( Dummy( dummy_input = 1 ) ) -# assert dummy_output == 2 - - -class Dummy(bt.Synapse): - """ - A simple dummy protocol representation which uses bt.Synapse as its base. - This protocol helps in handling dummy request and response communication between - the miner and the validator. - - Attributes: - - dummy_input: An integer value representing the input request sent by the validator. - - dummy_output: An optional integer value which, when filled, represents the response from the miner. - """ - - # Required request input, filled by sending dendrite caller. - dummy_input: int - - # Optional request output, filled by recieving axon. - dummy_output: typing.Optional[int] = None - - def deserialize(self) -> int: - """ - Deserialize the dummy output. This method retrieves the response from - the miner in the form of dummy_output, deserializes it and returns it - as the output of the dendrite.query() call. - - Returns: - - int: The deserialized response, which in this case is the value of dummy_output. - - Example: - Assuming a Dummy instance has a dummy_output value of 5: - >>> dummy_instance = Dummy(dummy_input=4) - >>> dummy_instance.dummy_output = 5 - >>> dummy_instance.deserialize() - 5 - """ - return self.dummy_output diff --git a/template/utils/__init__.py b/template/utils/__init__.py deleted file mode 100644 index 1e61220..0000000 --- a/template/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from . import config -from . import misc -from . import uids diff --git a/template/utils/config.py b/template/utils/config.py deleted file mode 100644 index c087b5e..0000000 --- a/template/utils/config.py +++ /dev/null @@ -1,177 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import torch -import argparse -import bittensor as bt -from loguru import logger - - -def check_config(cls, config: "bt.Config"): - r"""Checks/validates the config namespace object.""" - bt.logging.check_config(config) - - full_path = os.path.expanduser( - "{}/{}/{}/netuid{}/{}".format( - config.logging.logging_dir, # TODO: change from ~/.bittensor/miners to ~/.bittensor/neurons - config.wallet.name, - config.wallet.hotkey, - config.netuid, - config.neuron.name, - ) - ) - print("full path:", full_path) - config.neuron.full_path = os.path.expanduser(full_path) - if not os.path.exists(config.neuron.full_path): - os.makedirs(config.neuron.full_path, exist_ok=True) - - if not config.neuron.dont_save_events: - # Add custom event logger for the events. - logger.level("EVENTS", no=38, icon="📝") - logger.add( - os.path.join(config.neuron.full_path, "events.log"), - rotation=config.neuron.events_retention_size, - serialize=True, - enqueue=True, - backtrace=False, - diagnose=False, - level="EVENTS", - format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}", - ) - - -def add_args(cls, parser): - """ - Adds relevant arguments to the parser for operation. - """ - # Netuid Arg: The netuid of the subnet to connect to. - parser.add_argument("--netuid", type=int, help="Subnet netuid", default=1) - - neuron_type = ( - "validator" if "miner" not in cls.__name__.lower() else "miner" - ) - - parser.add_argument( - "--neuron.name", - type=str, - help="Trials for this neuron go in neuron.root / (wallet_cold - wallet_hot) / neuron.name. ", - default=neuron_type, - ) - - parser.add_argument( - "--neuron.device", - type=str, - help="Device to run on.", - default="cpu", - ) - - parser.add_argument( - "--neuron.epoch_length", - type=int, - help="The default epoch length (how often we set weights, measured in 12 second blocks).", - default=100, - ) - - parser.add_argument( - "--neuron.events_retention_size", - type=str, - help="Events retention size.", - default="2 GB", - ) - - parser.add_argument( - "--neuron.dont_save_events", - action="store_true", - help="If set, we dont save events to a log file.", - default=False, - ) - - if neuron_type == "validator": - parser.add_argument( - "--neuron.num_concurrent_forwards", - type=int, - help="The number of concurrent forwards running at any time.", - default=1, - ) - - parser.add_argument( - "--neuron.sample_size", - type=int, - help="The number of miners to query in a single step.", - default=10, - ) - - parser.add_argument( - "--neuron.disable_set_weights", - action="store_true", - help="Disables setting weights.", - default=False, - ) - - parser.add_argument( - "--neuron.moving_average_alpha", - type=float, - help="Moving average alpha parameter, how much to add of the new observation.", - default=0.05, - ) - - parser.add_argument( - "--neuron.axon_off", - "--axon_off", - action="store_true", - # Note: the validator needs to serve an Axon with their IP or they may - # be blacklisted by the firewall of serving peers on the network. - help="Set this flag to not attempt to serve an Axon.", - default=False, - ) - - parser.add_argument( - "--neuron.vpermit_tao_limit", - type=int, - help="The maximum number of TAO allowed to query a validator with a vpermit.", - default=4096, - ) - - else: - parser.add_argument( - "--blacklist.force_validator_permit", - action="store_true", - help="If set, we will force incoming requests to have a permit.", - default=False, - ) - - parser.add_argument( - "--blacklist.allow_non_registered", - action="store_true", - help="If set, miners will accept queries from non registered entities. (Dangerous!)", - default=False, - ) - - -def config(cls): - """ - Returns the configuration object specific to this miner or validator after adding relevant arguments. - """ - parser = argparse.ArgumentParser() - bt.wallet.add_args(parser) - bt.subtensor.add_args(parser) - bt.logging.add_args(parser) - bt.axon.add_args(parser) - cls.add_args(parser) - return bt.config(parser) diff --git a/template/utils/misc.py b/template/utils/misc.py deleted file mode 100644 index 80b4e61..0000000 --- a/template/utils/misc.py +++ /dev/null @@ -1,112 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao -# Copyright © 2023 Opentensor Foundation - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import time -import math -import hashlib as rpccheckhealth -from math import floor -from typing import Callable, Any -from functools import lru_cache, update_wrapper - - -# LRU Cache with TTL -def ttl_cache(maxsize: int = 128, typed: bool = False, ttl: int = -1): - """ - Decorator that creates a cache of the most recently used function calls with a time-to-live (TTL) feature. - The cache evicts the least recently used entries if the cache exceeds the `maxsize` or if an entry has - been in the cache longer than the `ttl` period. - - Args: - maxsize (int): Maximum size of the cache. Once the cache grows to this size, subsequent entries - replace the least recently used ones. Defaults to 128. - typed (bool): If set to True, arguments of different types will be cached separately. For example, - f(3) and f(3.0) will be treated as distinct calls with distinct results. Defaults to False. - ttl (int): The time-to-live for each cache entry, measured in seconds. If set to a non-positive value, - the TTL is set to a very large number, effectively making the cache entries permanent. Defaults to -1. - - Returns: - Callable: A decorator that can be applied to functions to cache their return values. - - The decorator is useful for caching results of functions that are expensive to compute and are called - with the same arguments frequently within short periods of time. The TTL feature helps in ensuring - that the cached values are not stale. - - Example: - @ttl_cache(ttl=10) - def get_data(param): - # Expensive data retrieval operation - return data - """ - if ttl <= 0: - ttl = 65536 - hash_gen = _ttl_hash_gen(ttl) - - def wrapper(func: Callable) -> Callable: - @lru_cache(maxsize, typed) - def ttl_func(ttl_hash, *args, **kwargs): - return func(*args, **kwargs) - - def wrapped(*args, **kwargs) -> Any: - th = next(hash_gen) - return ttl_func(th, *args, **kwargs) - - return update_wrapper(wrapped, func) - - return wrapper - - -def _ttl_hash_gen(seconds: int): - """ - Internal generator function used by the `ttl_cache` decorator to generate a new hash value at regular - time intervals specified by `seconds`. - - Args: - seconds (int): The number of seconds after which a new hash value will be generated. - - Yields: - int: A hash value that represents the current time interval. - - This generator is used to create time-based hash values that enable the `ttl_cache` to determine - whether cached entries are still valid or if they have expired and should be recalculated. - """ - start_time = time.time() - while True: - yield floor((time.time() - start_time) / seconds) - - -# 12 seconds updating block. -@ttl_cache(maxsize=1, ttl=12) -def ttl_get_block(self) -> int: - """ - Retrieves the current block number from the blockchain. This method is cached with a time-to-live (TTL) - of 12 seconds, meaning that it will only refresh the block number from the blockchain at most every 12 seconds, - reducing the number of calls to the underlying blockchain interface. - - Returns: - int: The current block number on the blockchain. - - This method is useful for applications that need to access the current block number frequently and can - tolerate a delay of up to 12 seconds for the latest information. By using a cache with TTL, the method - efficiently reduces the workload on the blockchain interface. - - Example: - current_block = ttl_get_block(self) - - Note: self here is the miner or validator instance - """ - return self.subtensor.get_current_block() diff --git a/template/utils/uids.py b/template/utils/uids.py deleted file mode 100644 index ce78c80..0000000 --- a/template/utils/uids.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch -import random -import bittensor as bt -from typing import List - - -def check_uid_availability( - metagraph: "bt.metagraph.Metagraph", uid: int, vpermit_tao_limit: int -) -> bool: - """Check if uid is available. The UID should be available if it is serving and has less than vpermit_tao_limit stake - Args: - metagraph (:obj: bt.metagraph.Metagraph): Metagraph object - uid (int): uid to be checked - vpermit_tao_limit (int): Validator permit tao limit - Returns: - bool: True if uid is available, False otherwise - """ - # Filter non serving axons. - if not metagraph.axons[uid].is_serving: - return False - # Filter validator permit > 1024 stake. - if metagraph.validator_permit[uid]: - if metagraph.S[uid] > vpermit_tao_limit: - return False - # Available otherwise. - return True - - -def get_random_uids( - self, k: int, exclude: List[int] = None -) -> torch.LongTensor: - """Returns k available random uids from the metagraph. - Args: - k (int): Number of uids to return. - exclude (List[int]): List of uids to exclude from the random sampling. - Returns: - uids (torch.LongTensor): Randomly sampled available uids. - Notes: - If `k` is larger than the number of available `uids`, set `k` to the number of available `uids`. - """ - candidate_uids = [] - avail_uids = [] - - for uid in range(self.metagraph.n.item()): - uid_is_available = check_uid_availability( - self.metagraph, uid, self.config.neuron.vpermit_tao_limit - ) - uid_is_not_excluded = exclude is None or uid not in exclude - - if uid_is_available: - avail_uids.append(uid) - if uid_is_not_excluded: - candidate_uids.append(uid) - - # Check if candidate_uids contain enough for querying, if not grab all avaliable uids - available_uids = candidate_uids - if len(candidate_uids) < k: - available_uids += random.sample( - [uid for uid in avail_uids if uid not in candidate_uids], - k - len(candidate_uids), - ) - uids = torch.tensor(random.sample(available_uids, k)) - return uids diff --git a/template/validator/__init__.py b/template/validator/__init__.py deleted file mode 100644 index e43fa85..0000000 --- a/template/validator/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .forward import forward -from .reward import reward diff --git a/template/validator/forward.py b/template/validator/forward.py deleted file mode 100644 index 8ced17f..0000000 --- a/template/validator/forward.py +++ /dev/null @@ -1,61 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao -# TODO(developer): Set your name -# Copyright © 2023 - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import bittensor as bt - -from template.protocol import Dummy -from template.validator.reward import get_rewards -from template.utils.uids import get_random_uids - - -async def forward(self): - """ - The forward function is called by the validator every time step. - - It is responsible for querying the network and scoring the responses. - - Args: - self (:obj:`bittensor.neuron.Neuron`): The neuron object which contains all the necessary state for the validator. - - """ - # TODO(developer): Define how the validator selects a miner to query, how often, etc. - # get_random_uids is an example method, but you can replace it with your own. - miner_uids = get_random_uids(self, k=self.config.neuron.sample_size) - - # The dendrite client queries the network. - responses = self.dendrite.query( - # Send the query to selected miner axons in the network. - axons=[self.metagraph.axons[uid] for uid in miner_uids], - # Construct a dummy query. This simply contains a single integer. - synapse=Dummy(dummy_input=self.step), - # All responses have the deserialize function called on them before returning. - # You are encouraged to define your own deserialization function. - deserialize=True, - ) - - # Log the results for monitoring purposes. - bt.logging.info(f"Received responses: {responses}") - - # TODO(developer): Define how the validator scores responses. - # Adjust the scores based on responses from miners. - rewards = get_rewards(self, query=self.step, responses=responses) - - bt.logging.info(f"Scored responses: {rewards}") - # Update the scores based on the rewards. You may want to define your own update_scores function for custom behavior. - self.update_scores(rewards, miner_uids) diff --git a/template/validator/reward.py b/template/validator/reward.py deleted file mode 100644 index ab2d435..0000000 --- a/template/validator/reward.py +++ /dev/null @@ -1,54 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2023 Yuma Rao -# TODO(developer): Set your name -# Copyright © 2023 - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import torch -from typing import List - - -def reward(query: int, response: int) -> float: - """ - Reward the miner response to the dummy request. This method returns a reward - value for the miner, which is used to update the miner's score. - - Returns: - - float: The reward value for the miner. - """ - - return 1.0 if response == query * 2 else 0 - - -def get_rewards( - self, - query: int, - responses: List[float], -) -> torch.FloatTensor: - """ - Returns a tensor of rewards for the given query and responses. - - Args: - - query (int): The query sent to the miner. - - responses (List[float]): A list of responses from the miner. - - Returns: - - torch.FloatTensor: A tensor of rewards for the given query and responses. - """ - # Get all the reward results by iteratively calling your reward() function. - return torch.FloatTensor( - [reward(query, response) for response in responses] - ).to(self.device) diff --git a/tests/test_template_validator.py b/tests/test_template_validator.py index 5d0110a..d05d797 100644 --- a/tests/test_template_validator.py +++ b/tests/test_template_validator.py @@ -24,11 +24,11 @@ from neurons.validator import Neuron as Validator from neurons.miner import Neuron as Miner -from template.protocol import Dummy -from template.validator.forward import forward -from template.utils.uids import get_random_uids -from template.validator.reward import get_rewards -from template.base.validator import BaseValidatorNeuron +from ocr_subnet.protocol import Dummy +from ocr_subnet.validator.forward import forward +from ocr_subnet.utils.uids import get_random_uids +from ocr_subnet.validator.reward import get_rewards +from ocr_subnet.base.validator import BaseValidatorNeuron class TemplateValidatorNeuronTestCase(unittest.TestCase):