1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
|
/* $OpenBSD: nvmevar.h,v 1.28 2021/08/29 12:02:52 kettenis Exp $ */
/*
* Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define NVME_IO_Q 1
#define NVME_HIB_Q 2
#define NVME_MAXPHYS (128 * 1024)
struct nvme_dmamem {
bus_dmamap_t ndm_map;
bus_dma_segment_t ndm_seg;
size_t ndm_size;
caddr_t ndm_kva;
};
#define NVME_DMA_MAP(_ndm) ((_ndm)->ndm_map)
#define NVME_DMA_LEN(_ndm) ((_ndm)->ndm_map->dm_segs[0].ds_len)
#define NVME_DMA_DVA(_ndm) ((u_int64_t)(_ndm)->ndm_map->dm_segs[0].ds_addr)
#define NVME_DMA_KVA(_ndm) ((void *)(_ndm)->ndm_kva)
struct nvme_softc;
struct nvme_queue;
struct nvme_ccb {
SIMPLEQ_ENTRY(nvme_ccb) ccb_entry;
bus_dmamap_t ccb_dmamap;
void *ccb_cookie;
void (*ccb_done)(struct nvme_softc *sc,
struct nvme_ccb *, struct nvme_cqe *);
bus_addr_t ccb_prpl_off;
u_int64_t ccb_prpl_dva;
u_int64_t *ccb_prpl;
u_int16_t ccb_id;
};
SIMPLEQ_HEAD(nvme_ccb_list, nvme_ccb);
struct nvme_queue {
struct mutex q_sq_mtx;
struct mutex q_cq_mtx;
struct nvme_dmamem *q_sq_dmamem;
struct nvme_dmamem *q_cq_dmamem;
struct nvme_dmamem *q_nvmmu_dmamem; /* for aplns(4) */
bus_size_t q_sqtdbl; /* submission queue tail doorbell */
bus_size_t q_cqhdbl; /* completion queue head doorbell */
u_int16_t q_id;
u_int32_t q_entries;
u_int32_t q_sq_tail;
u_int32_t q_cq_head;
u_int16_t q_cq_phase;
};
struct nvme_namespace {
struct nvm_identify_namespace *ident;
};
struct nvme_ops {
void (*op_enable)(struct nvme_softc *);
int (*op_q_alloc)(struct nvme_softc *,
struct nvme_queue *);
void (*op_q_free)(struct nvme_softc *,
struct nvme_queue *);
uint32_t (*op_sq_enter)(struct nvme_softc *,
struct nvme_queue *, struct nvme_ccb *);
void (*op_sq_leave)(struct nvme_softc *,
struct nvme_queue *, struct nvme_ccb *);
uint32_t (*op_sq_enter_locked)(struct nvme_softc *,
struct nvme_queue *, struct nvme_ccb *);
void (*op_sq_leave_locked)(struct nvme_softc *,
struct nvme_queue *, struct nvme_ccb *);
void (*op_cq_done)(struct nvme_softc *,
struct nvme_queue *, struct nvme_ccb *);
};
struct nvme_softc {
struct device sc_dev;
const struct nvme_ops *sc_ops;
u_int sc_openings;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh;
bus_size_t sc_ios;
bus_dma_tag_t sc_dmat;
void *sc_ih;
u_int sc_rdy_to;
size_t sc_mps;
size_t sc_mdts;
u_int sc_max_prpl;
u_int sc_dstrd;
struct nvm_identify_controller
sc_identify;
u_int sc_nn;
struct nvme_namespace *sc_namespaces;
struct nvme_queue *sc_admin_q;
struct nvme_queue *sc_q;
struct nvme_queue *sc_hib_q;
struct mutex sc_ccb_mtx;
struct nvme_ccb *sc_ccbs;
struct nvme_ccb_list sc_ccb_list;
struct nvme_dmamem *sc_ccb_prpls;
struct scsi_iopool sc_iopool;
};
#define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
int nvme_attach(struct nvme_softc *);
int nvme_activate(struct nvme_softc *, int);
int nvme_intr(void *);
int nvme_intr_intx(void *);
#define nvme_read4(_s, _r) \
bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
#define nvme_write4(_s, _r, _v) \
bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
u_int64_t
nvme_read8(struct nvme_softc *, bus_size_t);
void nvme_write8(struct nvme_softc *, bus_size_t, u_int64_t);
#define nvme_barrier(_s, _r, _l, _f) \
bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_f))
struct nvme_dmamem *
nvme_dmamem_alloc(struct nvme_softc *, size_t);
void nvme_dmamem_free(struct nvme_softc *, struct nvme_dmamem *);
void nvme_dmamem_sync(struct nvme_softc *, struct nvme_dmamem *, int);
|