1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
|
/* $OpenBSD: asc_ioasic.c,v 1.4 1998/05/18 00:25:08 millert Exp $ */
/* $NetBSD: asc_ioasic.c,v 1.12 1998/01/12 09:51:30 thorpej Exp $ */
/*
* Copyright 1996 The Board of Trustees of The Leland Stanford
* Junior University. All Rights Reserved.
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies. Stanford University
* makes no representations about the suitability of this
* software for any purpose. It is provided "as is" without
* express or implied warranty.
*
*/
#define USE_CACHED_BUFFER 0
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <dev/tc/tcvar.h>
#include <dev/tc/ioasicvar.h>
#include <machine/autoconf.h>
#include <pmax/dev/device.h> /* XXX */
#include <pmax/dev/scsi.h> /* XXX */
#include <pmax/dev/ascreg.h> /* XXX */
#include <dev/tc/ascvar.h>
#include <machine/cpu.h>
#include <machine/bus.h> /* bus, cache consistency, etc */
/*XXX*/
#include <pmax/pmax/asic.h> /* XXX ioasic register defs? */
#include <pmax/pmax/kmin.h> /* XXX ioasic register defs? */
#include <pmax/pmax/pmaxtype.h>
extern int pmax_boardtype;
extern vm_offset_t kvtophys __P((vm_offset_t));
/*
* Autoconfiguration data for config.
*/
int asc_ioasic_match __P((struct device *, void *, void *));
void asc_ioasic_attach __P((struct device *, struct device *, void *));
struct cfattach asc_ioasic_ca = {
sizeof(struct asc_softc), asc_ioasic_match, asc_ioasic_attach
};
#ifdef notdef
extern struct cfdriver asc_cd;
#endif
/*
* DMA callback declarations
*/
#ifdef ASC_IOASIC_BOUNCE
extern u_long asc_iomem;
#endif
static int
asic_dma_start __P((asc_softc_t asc, State *state, caddr_t cp, int flag,
int len, int off));
static void
asic_dma_end __P((asc_softc_t asc, State *state, int flag));
int
asc_ioasic_match(parent, match, aux)
struct device *parent;
void *match;
void *aux;
{
struct ioasicdev_attach_args *d = aux;
void *ascaddr;
if (strncmp(d->iada_modname, "asc", TC_ROM_LLEN) &&
strncmp(d->iada_modname, "PMAZ-AA ", TC_ROM_LLEN))
return (0);
/* probe for chip */
ascaddr = (void*)d->iada_addr;
if (tc_badaddr(ascaddr + ASC_OFFSET_53C94))
return (0);
return (1);
}
void
asc_ioasic_attach(parent, self, aux)
struct device *parent;
struct device *self;
void *aux;
{
register struct ioasicdev_attach_args *d = aux;
register asc_softc_t asc = (asc_softc_t) self;
#ifdef ASC_IOASIC_BOUNCE
u_char *buff;
int i;
#endif
void *ascaddr;
int unit;
ascaddr = (void*)MIPS_PHYS_TO_KSEG1(d->iada_addr);
unit = asc->sc_dev.dv_unit;
/*
* Initialize hw descriptor, cache some pointers
*/
asc->regs = (asc_regmap_t *)(ascaddr + ASC_OFFSET_53C94);
/*
* Set up machine dependencies.
* (1) how to do dma
* (2) timing based on turbochannel frequency
*/
#ifdef ASC_IOASIC_BOUNCE
#if USE_CACHED_BUFFER
/* XXX Use cached address for DMA buffer to increase raw read speed */
buff = (u_char *)MIPS_PHYS_TO_KSEG0(asc_iomem);
#else
buff = (u_char *)MIPS_PHYS_TO_KSEG1(asc_iomem);
#endif /* USE_CACHED_BUFFER */
/*
* Statically partition the DMA buffer between targets.
* This way we will eventually be able to attach/detach
* drives on-fly. And 18k/target is plenty for normal use.
*/
/*
* Give each target its own DMA buffer region.
* We may want to try ping ponging buffers later.
*/
for (i = 0; i < ASC_NCMD; i++)
asc->st[i].dmaBufAddr = buff + 8192 * i;
#endif /* ASC_IOASIC_BOUNCE */
*((volatile int *)IOASIC_REG_SCSI_DMAPTR(ioasic_base)) = -1;
*((volatile int *)IOASIC_REG_SCSI_DMANPTR(ioasic_base)) = -1;
*((volatile int *)IOASIC_REG_SCSI_SCR(ioasic_base)) = 0;
asc->dma_start = asic_dma_start;
asc->dma_end = asic_dma_end;
/* digital meters show IOASIC 53c94s are clocked at approx 25MHz */
ascattach(asc, ASC_SPEED_25_MHZ);
/* tie pseudo-slot to device */
ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_BIO,
asc_intr, asc);
}
/*
* DMA handling routines. For a turbochannel device, just set the dmar.
* For the I/O ASIC, handle the actual DMA interface.
*/
static int
asic_dma_start(asc, state, cp, flag, len, off)
asc_softc_t asc;
State *state;
caddr_t cp;
int flag;
int len;
int off;
{
register volatile u_int *ssr = (volatile u_int *)
IOASIC_REG_CSR(ioasic_base);
u_int phys, nphys;
/* stop DMA engine first */
*ssr &= ~IOASIC_CSR_DMAEN_SCSI;
*((volatile int *)IOASIC_REG_SCSI_SCR(ioasic_base)) = 0;
#ifndef ASC_IOASIC_BOUNCE
/* restrict len to the maximum the IOASIC can transfer */
if (len > ((caddr_t)mips_trunc_page(cp + NBPG * 2) - cp))
len = (caddr_t)mips_trunc_page(cp + NBPG * 2) - cp;
/* If R4K, writeback and invalidate the buffer */
if (CPUISMIPS3)
mips3_HitFlushDCache((vm_offset_t)cp, len);
/* Get physical address of buffer start, no next phys addr */
phys = (u_int)kvtophys((vm_offset_t)cp);
nphys = -1;
/* Compute 2nd DMA pointer only if next page is part of this I/O */
if ((NBPG - (phys & (NBPG - 1))) < len) {
cp = (caddr_t)mips_trunc_page(cp + NBPG);
nphys = (u_int)kvtophys((vm_offset_t)cp);
}
/* If not R4K, need to invalidate cache lines for both physical segments */
if (!CPUISMIPS3 && flag == ASCDMA_READ) {
MachFlushDCache(MIPS_PHYS_TO_KSEG0(phys),
nphys == 0xffffffff ? len : NBPG - (phys & (NBPG - 1)));
if (nphys != 0xffffffff)
MachFlushDCache(MIPS_PHYS_TO_KSEG0(nphys),
NBPG); /* XXX */
}
#else /* ASC_IOASIC_BOUNCE */
/* restrict len to the maximum the IOASIC can transfer */
if (len > ((caddr_t)mips_trunc_page(state->dmaBufAddr + off + NBPG * 2) - (caddr_t)(state->dmaBufAddr + off)))
len = (caddr_t)mips_trunc_page(state->dmaBufAddr + off + NBPG * 2) - (caddr_t)(state->dmaBufAddr + off);
if (flag == ASCDMA_WRITE)
bcopy(cp, state->dmaBufAddr + off, len);
cp = state->dmaBufAddr + off;
#if USE_CACHED_BUFFER
#ifdef MIPS3
/* If R4K, need to writeback the bounce buffer */
if (CPUISMIPS3)
mips3_HitFlushDCache((vm_offset_t)cp, len);
#endif /* MIPS3 */
phys = MIPS_KSEG0_TO_PHYS(cp);
cp = (caddr_t)mips_trunc_page(cp + NBPG);
nphys = MIPS_KSEG0_TO_PHYS(cp);
#else
phys = MIPS_KSEG1_TO_PHYS(cp);
cp = (caddr_t)mips_trunc_page(cp + NBPG);
nphys = MIPS_KSEG1_TO_PHYS(cp);
#endif /* USE_CACHED_BUFFER */
#endif /* ASC_IOASIC_BOUNCE */
#ifdef notyet
asc->dma_next = cp;
asc->dma_xfer = state->dmalen - (nphys - phys);
#endif
*(volatile int *)IOASIC_REG_SCSI_DMAPTR(ioasic_base) =
IOASIC_DMA_ADDR(phys);
*(volatile int *)IOASIC_REG_SCSI_DMANPTR(ioasic_base) =
IOASIC_DMA_ADDR(nphys);
if (flag == ASCDMA_READ)
*ssr |= IOASIC_CSR_SCSI_DIR | IOASIC_CSR_DMAEN_SCSI;
else
*ssr = (*ssr & ~IOASIC_CSR_SCSI_DIR) | IOASIC_CSR_DMAEN_SCSI;
wbflush();
return (len);
}
static void
asic_dma_end(asc, state, flag)
asc_softc_t asc;
State *state;
int flag;
{
register volatile u_int *ssr = (volatile u_int *)
IOASIC_REG_CSR(ioasic_base);
register volatile u_int *dmap = (volatile u_int *)
IOASIC_REG_SCSI_DMAPTR(ioasic_base);
register u_short *to;
register int w;
int nb;
*ssr &= ~IOASIC_CSR_DMAEN_SCSI;
#if USE_CACHED_BUFFER /* XXX - Should uncached address always be used? */
to = (u_short *)MIPS_PHYS_TO_KSEG0(*dmap >> 3);
#else
to = (u_short *)MIPS_PHYS_TO_KSEG1(*dmap >> 3);
#endif
*dmap = -1;
*((volatile int *)IOASIC_REG_SCSI_DMANPTR(ioasic_base)) = -1;
wbflush();
if (flag == ASCDMA_READ) {
#if !defined(ASC_IOASIC_BOUNCE) && USE_CACHED_BUFFER
/* Invalidate cache for the buffer */
#ifdef MIPS3
if (CPUISMIPS3)
MachFlushDCache(MIPS_KSEG1_TO_PHYS(state->dmaBufAddr),
state->dmalen);
else
#endif /* MIPS3 */
MachFlushDCache(MIPS_PHYS_TO_KSEG0(
MIPS_KSEG1_TO_PHYS(state->dmaBufAddr)),
state->dmalen);
#endif /* USE_CACHED_BUFFER */
if ( (nb = *((int *)IOASIC_REG_SCSI_SCR(ioasic_base))) != 0) {
/* pick up last upto6 bytes, sigh. */
/* Last byte really xferred is.. */
w = *(int *)IOASIC_REG_SCSI_SDR0(ioasic_base);
*to++ = w;
if (--nb > 0) {
w >>= 16;
*to++ = w;
}
if (--nb > 0) {
w = *(int *)IOASIC_REG_SCSI_SDR1(ioasic_base);
*to++ = w;
}
}
#ifdef ASC_IOASIC_BOUNCE
bcopy(state->dmaBufAddr, state->buf, state->dmalen);
#endif
}
}
#ifdef notdef
/*
* Called by asic_intr() for scsi dma pointer update interrupts.
*/
void
asc_dma_intr()
{
asc_softc_t asc = &asc_cd.cd_devs[0]; /*XXX*/
u_int next_phys;
asc->dma_xfer -= NBPG;
if (asc->dma_xfer <= -NBPG) {
volatile u_int *ssr = (volatile u_int *)
IOASIC_REG_CSR(ioasic_base);
*ssr &= ~IOASIC_CSR_DMAEN_SCSI;
} else {
asc->dma_next += NBPG;
next_phys = MIPS_KSEG0_TO_PHYS(asc->dma_next);
}
*(volatile int *)IOASIC_REG_SCSI_DMANPTR(ioasic_base) =
IOASIC_DMA_ADDR(next_phys);
wbflush();
}
#endif /*notdef*/
|