in sbp/sbp_target.c [258:475]
static void sbp_management_request_login(
struct sbp_management_agent *agent, struct sbp_management_request *req,
int *status_data_size)
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
struct sbp_session *sess;
struct sbp_login_descriptor *login;
struct sbp_login_response_block *response;
u64 guid;
u32 unpacked_lun;
int login_response_len, ret;
unpacked_lun = sbp_get_lun_from_tpg(tpg,
LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
if (ret) {
pr_notice("login to unknown LUN: %d\n",
LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
return;
}
ret = read_peer_guid(&guid, req);
if (ret != RCODE_COMPLETE) {
pr_warn("failed to read peer GUID: %d\n", ret);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
return;
}
pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
unpacked_lun, guid);
sess = sbp_session_find_by_guid(tpg, guid);
if (sess) {
login = sbp_login_find_by_lun(sess, unpacked_lun);
if (login) {
pr_notice("initiator already logged-in\n");
/*
* SBP-2 R4 says we should return access denied, but
* that can confuse initiators. Instead we need to
* treat this like a reconnect, but send the login
* response block like a fresh login.
*
* This is required particularly in the case of Apple
* devices booting off the FireWire target, where
* the firmware has an active login to the target. When
* the OS takes control of the session it issues its own
* LOGIN rather than a RECONNECT. To avoid the machine
* waiting until the reconnect_hold expires, we can skip
* the ACCESS_DENIED errors to speed things up.
*/
goto already_logged_in;
}
}
/*
* check exclusive bit in login request
* reject with access_denied if any logins present
*/
if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
pr_warn("refusing exclusive login with other active logins\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
return;
}
/*
* check exclusive bit in any existing login descriptor
* reject with access_denied if any exclusive logins present
*/
if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
pr_warn("refusing login while another exclusive login present\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
return;
}
/*
* check we haven't exceeded the number of allowed logins
* reject with resources_unavailable if we have
*/
if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
tport->max_logins_per_lun) {
pr_warn("max number of logins reached\n");
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
if (!sess) {
sess = sbp_session_create(tpg, guid);
if (IS_ERR(sess)) {
switch (PTR_ERR(sess)) {
case -EPERM:
ret = SBP_STATUS_ACCESS_DENIED;
break;
default:
ret = SBP_STATUS_RESOURCES_UNAVAIL;
break;
}
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(
STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(ret));
return;
}
sess->node_id = req->node_addr;
sess->card = fw_card_get(req->card);
sess->generation = req->generation;
sess->speed = req->speed;
schedule_delayed_work(&sess->maint_work,
SESSION_MAINTENANCE_INTERVAL);
}
/* only take the latest reconnect_hold into account */
sess->reconnect_hold = min(
1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
tport->max_reconnect_timeout) - 1;
login = kmalloc(sizeof(*login), GFP_KERNEL);
if (!login) {
pr_err("failed to allocate login descriptor\n");
sbp_session_release(sess, true);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
login->sess = sess;
login->login_lun = unpacked_lun;
login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
login->login_id = atomic_inc_return(&login_id);
login->tgt_agt = sbp_target_agent_register(login);
if (IS_ERR(login->tgt_agt)) {
ret = PTR_ERR(login->tgt_agt);
pr_err("failed to map command block handler: %d\n", ret);
sbp_session_release(sess, true);
kfree(login);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
spin_lock_bh(&sess->lock);
list_add_tail(&login->link, &sess->login_list);
spin_unlock_bh(&sess->lock);
already_logged_in:
response = kzalloc(sizeof(*response), GFP_KERNEL);
if (!response) {
pr_err("failed to allocate login response block\n");
sbp_login_release(login, true);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
return;
}
login_response_len = clamp_val(
LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
12, sizeof(*response));
response->misc = cpu_to_be32(
((login_response_len & 0xffff) << 16) |
(login->login_id & 0xffff));
response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
&response->command_block_agent);
ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
sess->node_id, sess->generation, sess->speed,
sbp2_pointer_to_addr(&req->orb.ptr2), response,
login_response_len);
if (ret != RCODE_COMPLETE) {
pr_debug("failed to write login response block: %x\n", ret);
kfree(response);
sbp_login_release(login, true);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
return;
}
kfree(response);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
}