QuickStart

Install the latest CityLearn version from PyPi with the :code:pip command:

[ ]:
pip install CityLearn

Centralized RBC

Run the following to simulate an environment controlled by centralized RBC agent for a single episode:

[9]:
from citylearn.citylearn import CityLearnEnv
from citylearn.agents.rbc import BasicRBC as RBCAgent

dataset_name = 'citylearn_challenge_2022_phase_1'
env = CityLearnEnv(dataset_name, central_agent=True)
agents = RBCAgent(
    action_space=env.action_space,
    observation_space=env.observation_space,
    building_information=env.get_building_information(),
    observation_names=env.observation_names,
)
observations = env.reset()

while not env.done:
    actions = agents.select_actions(observations)

    # apply actions to env
    observations, rewards, _, _ = env.step(actions)

# print cost functions at the end of episode
for n, nd in env.evaluate().groupby('name'):
    nd = nd.pivot(index='name', columns='cost_function', values='value').round(3)
    print(n, ':', nd.to_dict('records'))
Building_1 : [{'carbon_emissions': 1.134, 'electricity_consumption': 1.184, 'pricing': 1.043, 'zero_net_energy': 1.118}]
Building_2 : [{'carbon_emissions': 1.158, 'electricity_consumption': 1.215, 'pricing': 1.063, 'zero_net_energy': 1.101}]
Building_3 : [{'carbon_emissions': 1.272, 'electricity_consumption': 1.346, 'pricing': 1.145, 'zero_net_energy': 1.294}]
Building_4 : [{'carbon_emissions': 1.181, 'electricity_consumption': 1.237, 'pricing': 1.097, 'zero_net_energy': 1.085}]
Building_5 : [{'carbon_emissions': 1.186, 'electricity_consumption': 1.262, 'pricing': 1.075, 'zero_net_energy': 1.145}]
District : [{'1 - load_factor': 0.987, 'average_daily_peak': 1.15, 'carbon_emissions': 1.186, 'electricity_consumption': 1.249, 'peak_demand': 1.052, 'pricing': 1.085, 'ramping': 1.162, 'zero_net_energy': 1.148}]

Decentralized-Independent SAC

Run the following to simulate an environment controlled by decentralized-independent SAC agents for 1 training episode:

[10]:
from citylearn.citylearn import CityLearnEnv
from citylearn.agents.sac import SAC as RLAgent

dataset_name = 'citylearn_challenge_2022_phase_1'
env = CityLearnEnv(dataset_name)
agents = RLAgent(
    action_space=env.action_space,
    observation_space=env.observation_space,
    building_information=env.get_building_information(),
    observation_names=env.observation_names,
)
episodes = 1 # number of training episodes

# train agents
for e in range(episodes):
    observations = env.reset()

    while not env.done:
        actions = agents.select_actions(observations)

        # apply actions to env
        next_observations, rewards, _, _ = env.step(actions)

        # update policies
        agents.add_to_buffer(observations, actions, rewards, next_observations, done=env.done)
        observations = [o for o in next_observations]

    # print cost functions at the end of episode
    print(f'Episode: {e}')

    for n, nd in env.evaluate().groupby('name'):
        nd = nd.pivot(index='name', columns='cost_function', values='value').round(3)
        print(n, ':', nd.to_dict('records'))
Episode: 0
Building_1 : [{'carbon_emissions': 1.262, 'electricity_consumption': 1.277, 'pricing': 1.227, 'zero_net_energy': 1.195}]
Building_2 : [{'carbon_emissions': 1.303, 'electricity_consumption': 1.321, 'pricing': 1.273, 'zero_net_energy': 1.163}]
Building_3 : [{'carbon_emissions': 1.514, 'electricity_consumption': 1.534, 'pricing': 1.477, 'zero_net_energy': 1.474}]
Building_4 : [{'carbon_emissions': 1.399, 'electricity_consumption': 1.394, 'pricing': 1.406, 'zero_net_energy': 1.134}]
Building_5 : [{'carbon_emissions': 1.469, 'electricity_consumption': 1.494, 'pricing': 1.425, 'zero_net_energy': 1.224}]
District : [{'1 - load_factor': 1.007, 'average_daily_peak': 1.33, 'carbon_emissions': 1.389, 'electricity_consumption': 1.404, 'peak_demand': 1.343, 'pricing': 1.361, 'ramping': 2.178, 'zero_net_energy': 1.238}]

Decentralized-Cooperative MARLISA

Run the following to simulate an environment controlled by decentralized-cooperative MARLISA agents for 1 training episode:

[11]:
from citylearn.citylearn import CityLearnEnv
from citylearn.agents.marlisa import MARLISA as RLAgent

dataset_name = 'citylearn_challenge_2022_phase_1'
env = CityLearnEnv(dataset_name)
agents = RLAgent(
    action_space=env.action_space,
    observation_space=env.observation_space,
    building_information=env.get_building_information(),
    observation_names=env.observation_names,
)
episodes = 1 # number of training episodes

# train agents
for e in range(episodes):
    observations = env.reset()

    while not env.done:
        actions = agents.select_actions(observations)

        # apply actions to env
        next_observations, rewards, _, _ = env.step(actions)

        # update policies
        agents.add_to_buffer(observations, actions, rewards, next_observations, done=env.done)
        observations = [o for o in next_observations]

    # print cost functions at the end of episode
    print(f'Episode: {e}')

    for n, nd in env.evaluate().groupby('name'):
        nd = nd.pivot(index='name', columns='cost_function', values='value').round(3)
        print(n, ':', nd.to_dict('records'))
Episode: 0
Building_1 : [{'carbon_emissions': 1.271, 'electricity_consumption': 1.283, 'pricing': 1.236, 'zero_net_energy': 1.184}]
Building_2 : [{'carbon_emissions': 1.316, 'electricity_consumption': 1.33, 'pricing': 1.289, 'zero_net_energy': 1.158}]
Building_3 : [{'carbon_emissions': 1.524, 'electricity_consumption': 1.537, 'pricing': 1.511, 'zero_net_energy': 1.458}]
Building_4 : [{'carbon_emissions': 1.394, 'electricity_consumption': 1.395, 'pricing': 1.392, 'zero_net_energy': 1.131}]
Building_5 : [{'carbon_emissions': 1.481, 'electricity_consumption': 1.501, 'pricing': 1.441, 'zero_net_energy': 1.222}]
District : [{'1 - load_factor': 1.005, 'average_daily_peak': 1.342, 'carbon_emissions': 1.397, 'electricity_consumption': 1.409, 'peak_demand': 1.232, 'pricing': 1.374, 'ramping': 2.201, 'zero_net_energy': 1.23}]